In this group project, you are going to build a 3D Conv model that will be able to predict the 5 gestures correctly.
- Trisit Kumar Chatterjee
- Subhasis Jethy
Imagine you are working as a data scientist at a home electronics company which manufactures state of the art smart televisions. You want to develop a cool feature in the smart-TV that can recognise five different gestures performed by the user which will help users control the TV without using a remote.
The gestures are continuously monitored by the webcam mounted on the TV. Each gesture corresponds to a specific command
# # Mount the drive to google colab notebook
# from google.colab import drive
# drive.mount('/content/drive')
Mounted at /content/drive
# import important libraries
import numpy as np
import pandas as pd
import os
#from scipy.misc import imread, imresize
from skimage.io import imread
from skimage.transform import resize as imresize
import cv2
import matplotlib.pyplot as plt
import datetime
# Import all the required Keras modules
from keras.models import Sequential, Model, load_model
from keras.layers import GlobalAveragePooling2D, Input, Flatten, BatchNormalization, Activation, Dropout
from tensorflow.keras import regularizers
from keras.layers import Dense, GRU, Flatten, TimeDistributed
from keras.layers.convolutional import Conv3D, MaxPooling3D, Conv2D, MaxPooling2D
from keras.layers.recurrent import LSTM
from keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, EarlyStopping
from keras import optimizers
from keras.regularizers import l2
from keras.applications.vgg16 import VGG16
#from keras.applications.resnet50 import ResNet50
from keras.applications.mobilenet import MobileNet
from tensorflow.keras.layers import LeakyReLU
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
We set the random seed so that the results don't vary drastically.
np.random.seed(30)
import random as rn
rn.seed(30)
from keras import backend as K
import tensorflow as tf
tf.random.set_seed(30) # Corrected
print(tf.__version__)
print(rn.randint(1,9999))
2.3.1 8833
In this block, you read the folder names for training and validation. You also set the batch_size here. Note that you set the batch size in such a way that you are able to use the GPU in full capacity. You keep increasing the batch size until the machine throws an error.
train_doc = np.random.permutation(open('/datasets/Project_data/train.csv').readlines())
val_doc = np.random.permutation(open('/datasets/Project_data/val.csv').readlines())
batch_size = 32 #experiment with the batch size
train_path = '/datasets/Project_data/train' # To be used as source_path in generator function
val_path = '/datasets/Project_data/val'
# Create a seperate data frame of train data for EDA
train_df = pd.read_csv('/datasets/Project_data/train.csv',header = None)
train_df = train_df[0].str.split(pat = ';',expand = True)
train_df.rename(columns = {0: 'Name', 1: 'Gesture', 2: 'Class'},inplace = True)
train_df.sample(n=5, random_state=30)
| Name | Gesture | Class | |
|---|---|---|---|
| 152 | WIN_20180926_16_54_08_Pro_Right_Swipe_new | Right_Swipe_new | 1 |
| 314 | WIN_20180925_18_02_58_Pro_Thumbs_Down_new | Thumbs_Down_new | 3 |
| 12 | WIN_20180925_17_33_08_Pro_Left_Swipe_new | Left_Swipe_new | 0 |
| 413 | WIN_20180925_17_51_17_Pro_Thumbs_Up_new | Thumbs_Up_new | 4 |
| 74 | WIN_20180926_17_17_35_Pro_Left_Swipe_new | Left_Swipe_new | 0 |
# Check for class imbalance
train_df['Class'].value_counts()
1 137 3 137 0 136 2 130 4 123 Name: Class, dtype: int64
# Let us see images are of which shape. It will help us in reshaping all of them to the same size
shape_120_160 = 0
shape_360_360 = 0
shape_rest = 0
for folder in train_df['Name']:
temp = os.listdir('/datasets/Project_data/train'+'/'+ folder)
if(imread('/datasets/Project_data/train'+'/'+ folder +'/'+temp[2]).shape == (120,160,3)):
shape_120_160 += 1
elif(imread('/datasets/Project_data/train'+'/'+ folder +'/'+temp[2]).shape == (360,360,3)):
shape_360_360 += 1
else:
shape_rest += 1
print('Number of images with shape 120x160x3:', shape_120_160)
print('Number of images with shape 360x360x3:', shape_360_360)
print('Number of images with other shapes :', shape_rest)
Number of images with shape 120x160x3: 479 Number of images with shape 360x360x3: 184 Number of images with other shapes : 0
# This is a custom function to show the images and their augmentation
def imageview(class_img, source_path):
# Get all 30 images in the folder
images = os.listdir(source_path+'/'+ class_img)
# Select one image (I have chosen 25th image) among the 30 images
image = imread(source_path+'/'+ class_img +'/'+images[24])
print('original:',image.shape)
# Resize the image
resized_img = cv2.resize(image, (120,120), interpolation = cv2.INTER_AREA)
print('resized:',resized_img.shape)
# Apply Gaussian Blur on the image
blur_image = cv2.GaussianBlur(resized_img, (3,3), 3)
print('blurred:',blur_image.shape)
# Image Edge Detection
edge_image = cv2.Canny(blur_image, 75, 150)
# Get it into shape of 120x120x3 to maintain same shape through out (Canny removes the channels)
edge_image1 = cv2.cvtColor(edge_image, cv2.COLOR_GRAY2BGR)
print('edge:',edge_image1.shape)
# Normalize the image
norm_img = resized_img/255
print('normalized:',norm_img.shape)
fig = plt.figure(figsize = (20, 30))
plt.subplot(151)
plt.title('Original')
plt.imshow(image)
plt.subplot(152)
plt.title('Resized')
plt.imshow(resized_img)
plt.subplot(153)
plt.title('Gaussian Blur')
plt.imshow(blur_image)
plt.subplot(154)
plt.title('Edges')
plt.imshow(edge_image1, cmap = 'gray')
plt.subplot(155)
plt.title('Normalized')
plt.imshow(norm_img)
plt.show()
# Consider two images (one with 120x160x3 and another with 360x360x3) each from every class for analysis
class_0_120 = train_df[train_df['Class']=='0'].reset_index()['Name'][2]
class_0_360 = train_df[train_df['Class']=='0'].reset_index()['Name'][133]
class_1_120 = train_df[train_df['Class']=='1'].reset_index()['Name'][0]
class_1_360 = train_df[train_df['Class']=='1'].reset_index()['Name'][131]
class_2_120 = train_df[train_df['Class']=='2'].reset_index()['Name'][1]
class_2_360 = train_df[train_df['Class']=='2'].reset_index()['Name'][121]
class_3_120 = train_df[train_df['Class']=='3'].reset_index()['Name'][1]
class_3_360 = train_df[train_df['Class']=='3'].reset_index()['Name'][130]
class_4_120 = train_df[train_df['Class']=='4'].reset_index()['Name'][5]
class_4_360 = train_df[train_df['Class']=='4'].reset_index()['Name'][111]
# View the images with their augmented outputs
print('\033[1m' + 'Image for Swipe Left:' + '\033[0m')
imageview(class_0_120, '/datasets/Project_data/train')
imageview(class_0_360, '/datasets/Project_data/train')
print('\033[1m' + 'Image for Swipe Right:' + '\033[0m')
imageview(class_1_120, '/datasets/Project_data/train')
imageview(class_1_360, '/datasets/Project_data/train')
print('\033[1m' + 'Image for Stop:' + '\033[0m')
imageview(class_2_120, '/datasets/Project_data/train')
imageview(class_2_360, '/datasets/Project_data/train')
print('\033[1m' + 'Image for Volume Down:' + '\033[0m')
imageview(class_3_120, '/datasets/Project_data/train')
imageview(class_3_360, '/datasets/Project_data/train')
print('\033[1m' + 'Image for Volume Up:' + '\033[0m')
imageview(class_4_120, '/datasets/Project_data/train')
imageview(class_4_360, '/datasets/Project_data/train')
Image for Swipe Left:
original: (120, 160, 3)
resized: (120, 120, 3)
blurred: (120, 120, 3)
edge: (120, 120, 3)
normalized: (120, 120, 3)
original: (360, 360, 3) resized: (120, 120, 3) blurred: (120, 120, 3) edge: (120, 120, 3) normalized: (120, 120, 3)
Image for Swipe Right:
original: (120, 160, 3)
resized: (120, 120, 3)
blurred: (120, 120, 3)
edge: (120, 120, 3)
normalized: (120, 120, 3)
original: (360, 360, 3) resized: (120, 120, 3) blurred: (120, 120, 3) edge: (120, 120, 3) normalized: (120, 120, 3)
Image for Stop:
original: (120, 160, 3)
resized: (120, 120, 3)
blurred: (120, 120, 3)
edge: (120, 120, 3)
normalized: (120, 120, 3)
original: (360, 360, 3) resized: (120, 120, 3) blurred: (120, 120, 3) edge: (120, 120, 3) normalized: (120, 120, 3)
Image for Volume Down:
original: (120, 160, 3)
resized: (120, 120, 3)
blurred: (120, 120, 3)
edge: (120, 120, 3)
normalized: (120, 120, 3)
original: (360, 360, 3) resized: (120, 120, 3) blurred: (120, 120, 3) edge: (120, 120, 3) normalized: (120, 120, 3)
Image for Volume Up:
original: (120, 160, 3)
resized: (120, 120, 3)
blurred: (120, 120, 3)
edge: (120, 120, 3)
normalized: (120, 120, 3)
original: (360, 360, 3) resized: (120, 120, 3) blurred: (120, 120, 3) edge: (120, 120, 3) normalized: (120, 120, 3)
This is one of the most important part of the code. The overall structure of the generator has been given. In the generator, you are going to preprocess the images as you have images of 2 different dimensions as well as create a batch of video frames. You have to experiment with img_idx, y,z and normalization such that you get high accuracy.
# The generator function
def generator(source_path, folder_list, batch_size, num_frames, frame_height, frame_width, augment, normalize):
print( 'Source path = ', source_path, '; batch size =', batch_size)
# Based on the number of frames, some frames can be skipped in between. Use linspace function to do that
img_idx = np.round(np.linspace(0, 29, num_frames)).astype(int) #create a list of image numbers you want to use for a particular video
print("Image Indexes: ", img_idx)
while True:
t = np.random.permutation(folder_list)
num_batches = int(len(t)//batch_size) # calculate the number of batches
for batch in range(num_batches): # we iterate over the number of batches
batch_data = np.zeros((batch_size, num_frames, frame_height, frame_width, 3)) # x is the number of images you use for each video, (y,z) is the final size of the input images and 3 is the number of channels RGB
batch_labels = np.zeros((batch_size,5)) # batch_labels is the one hot representation of the output
batch_data_blur = np.zeros((batch_size, num_frames, frame_height, frame_width, 3))
batch_data_edge = np.zeros((batch_size, num_frames, frame_height, frame_width, 3))
for folder in range(batch_size): # iterate over the batch_size
imgs = os.listdir(source_path+'/'+ t[folder + (batch*batch_size)].split(';')[0]) # read all the images in the folder
for idx,item in enumerate(img_idx): # Iterate over the frames/images of a folder to read them in
image = imread(source_path+'/'+ t[folder + (batch*batch_size)].strip().split(';')[0]+'/'+imgs[item]).astype(np.float32)
#crop the images and resize them. Note that the images are of 2 different shape
#and the conv3D will throw error if the inputs in a batch have different shapes
# Resize the Images maintaining the aspect ratio
image_resized = cv2.resize(image, (frame_height,frame_width), interpolation = cv2.INTER_AREA)
if(normalize == True):
batch_data[folder,idx,:,:,0] = (image_resized[:,:,0])/255.0 #normalise and feed in the image
batch_data[folder,idx,:,:,1] = (image_resized[:,:,1])/255.0 #normalise and feed in the image
batch_data[folder,idx,:,:,2] = (image_resized[:,:,2])/255.0 #normalise and feed in the image
else:
batch_data[folder,idx,:,:,0] = (image_resized[:,:,0]) #normalise and feed in the image
batch_data[folder,idx,:,:,1] = (image_resized[:,:,1]) #normalise and feed in the image
batch_data[folder,idx,:,:,2] = (image_resized[:,:,2]) #normalise and feed in the image
# Add augmentation (Gaussian Noise Images)
if (augment == True):
# Add blurred images
image_blur = cv2.GaussianBlur(image_resized,(3,3),3)
if(normalize == True):
batch_data_blur[folder,idx,:,:,0] = (image_blur[:,:,0])/255 #normalise and feed in the image
batch_data_blur[folder,idx,:,:,1] = (image_blur[:,:,1])/255 #normalise and feed in the image
batch_data_blur[folder,idx,:,:,2] = (image_blur[:,:,2])/255 #normalise and feed in the image
else:
batch_data_blur[folder,idx,:,:,0] = (image_blur[:,:,0]) #normalise and feed in the image
batch_data_blur[folder,idx,:,:,1] = (image_blur[:,:,1]) #normalise and feed in the image
batch_data_blur[folder,idx,:,:,2] = (image_blur[:,:,2]) #normalise and feed in the image
batch_labels[folder, int(t[folder + (batch*batch_size)].strip().split(';')[2])] = 1
if(augment == True):
# Add blurred images to the data
batch_data = np.concatenate([batch_data, batch_data_blur])
batch_labels = np.concatenate([batch_labels, batch_labels])
yield batch_data, batch_labels #you yield the batch_data and the batch_labels, remember what does yield do
# write the code for the remaining data points which are left after full batches
remaining_data = len(t) % batch_size
if(remaining_data != 0):
batch_data = np.zeros((remaining_data, num_frames, frame_height, frame_width, 3)) # x is the number of images you use for each video, (y,z) is the final size of the input images and 3 is the number of channels RGB
batch_labels = np.zeros((remaining_data,5)) # batch_labels is the one hot representation of the output
batch_data_blur = np.zeros((remaining_data, num_frames, frame_height, frame_width, 3))
batch_data_edge = np.zeros((remaining_data, num_frames, frame_height, frame_width, 3))
for folder in range(remaining_data): # iterate over the batch_size
imgs = os.listdir(source_path+'/'+ t[folder + (num_batches*batch_size)].split(';')[0]) # read all the images in the folder
for idx,item in enumerate(img_idx): # Iterate iver the frames/images of a folder to read them in
image = imread(source_path+'/'+ t[folder + (num_batches*batch_size)].strip().split(';')[0]+'/'+imgs[item]).astype(np.float32)
#crop the images and resize them. Note that the images are of 2 different shape
#and the conv3D will throw error if the inputs in a batch have different shapes
# Resize the Images maintaining the aspect ratio
image_resized = cv2.resize(image, (frame_height,frame_width), interpolation = cv2.INTER_AREA)
if(normalize == True):
batch_data[folder,idx,:,:,0] = (image_resized[:,:,0])/255.0 #normalise and feed in the image
batch_data[folder,idx,:,:,1] = (image_resized[:,:,1])/255.0 #normalise and feed in the image
batch_data[folder,idx,:,:,2] = (image_resized[:,:,2])/255.0 #normalise and feed in the image
else:
batch_data[folder,idx,:,:,0] = (image_resized[:,:,0]) #normalise and feed in the image
batch_data[folder,idx,:,:,1] = (image_resized[:,:,1]) #normalise and feed in the image
batch_data[folder,idx,:,:,2] = (image_resized[:,:,2]) #normalise and feed in the image
# Add augmentation (Gaussian Noise Images)
if(augment == True):
# Add blurred images
image_blur = cv2.GaussianBlur(image_resized,(3,3),3)
if(normalize == True):
batch_data_blur[folder,idx,:,:,0] = (image_blur[:,:,0])/255 #normalise and feed in the image
batch_data_blur[folder,idx,:,:,1] = (image_blur[:,:,1])/255 #normalise and feed in the image
batch_data_blur[folder,idx,:,:,2] = (image_blur[:,:,2])/255 #normalise and feed in the image
else:
batch_data_blur[folder,idx,:,:,0] = (image_blur[:,:,0]) #normalise and feed in the image
batch_data_blur[folder,idx,:,:,1] = (image_blur[:,:,1]) #normalise and feed in the image
batch_data_blur[folder,idx,:,:,2] = (image_blur[:,:,2]) #normalise and feed in the image
batch_labels[folder, int(t[folder + (num_batches*batch_size)].strip().split(';')[2])] = 1
if(augment == True):
# Add blurred images to the data
batch_data = np.concatenate([batch_data, batch_data_blur])
batch_labels = np.concatenate([batch_labels, batch_labels])
yield batch_data, batch_labels
Note here that a video is represented above in the generator as (number of images, height, width, number of channels). Take this into consideration while creating the model architecture.
# Test the Generator function on training data set
num_frames = 20
frame_height = 120
frame_width = 120
augment = False
normalize = True
train_generator = generator(train_path, train_doc, batch_size, num_frames, frame_height, frame_width, augment, normalize)
for i in range(int(len(train_doc)//batch_size + 1)):
batch_data_temp, batch_label_temp = next(train_generator)
print('Shape of each batch: ',batch_data_temp.shape)
print('Shape of each batch label: ',batch_label_temp.shape)
Source path = /datasets/Project_data/train ; batch size = 32 Image Indexes: [ 0 2 3 5 6 8 9 11 12 14 15 17 18 20 21 23 24 26 27 29] Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (23, 20, 120, 120, 3) Shape of each batch label: (23, 5)
# Test the Generator function on validation data set
num_frames = 20
frame_height = 120
frame_width = 120
augment = False
normalize = True
val_generator = generator(val_path, val_doc, batch_size, num_frames, frame_height, frame_width, augment, normalize)
if (len(val_doc)%batch_size) == 0:
for i in range(int(len(val_doc)/batch_size)):
batch_data_temp, batch_label_temp = next(val_generator)
print('Shape of each batch: ',batch_data_temp.shape)
print('Shape of each batch label: ',batch_label_temp.shape)
else:
for i in range(int(len(val_doc)//batch_size + 1)):
batch_data_temp, batch_label_temp = next(val_generator)
print('Shape of each batch: ',batch_data_temp.shape)
print('Shape of each batch label: ',batch_label_temp.shape)
Source path = /datasets/Project_data/val ; batch size = 32 Image Indexes: [ 0 2 3 5 6 8 9 11 12 14 15 17 18 20 21 23 24 26 27 29] Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (32, 20, 120, 120, 3) Shape of each batch label: (32, 5) Shape of each batch: (4, 20, 120, 120, 3) Shape of each batch label: (4, 5)
curr_dt_time = datetime.datetime.now()
num_train_sequences = len(train_doc)
print('# training sequences =', num_train_sequences)
num_val_sequences = len(val_doc)
print('# validation sequences =', num_val_sequences)
num_epochs = 30 # choose the number of epochs
print ('# epochs =', num_epochs)
# training sequences = 663 # validation sequences = 100 # epochs = 30
Here you make the model using different functionalities that Keras provides. Remember to use Conv3D and MaxPooling3D and not Conv2D and Maxpooling2D for a 3D convolution model. You would want to use TimeDistributed while building a Conv2D + RNN model. Also remember that the last layer is the softmax. Design the network in such a way that the model is able to give good accuracy on the least number of parameters so that it can fit in the memory of the webcam.
Let us write some custom functions first which need to be repeatedly called
# Function to fit and train the Model
def trainer(model, model_name, num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalize):
#normalize = normalize
# Model with the training data
train_gen = generator(train_path, train_doc, size_batch, num_frames, frame_height, frame_width, augment, normalize)
# Calculate steps per epoch
if (len(train_doc)%size_batch) == 0:
train_steps_per_epoch = int(len(train_doc)/size_batch)
else:
train_steps_per_epoch = (len(train_doc)//size_batch) + 1
# Model with validation data (Augmentation is not applied on the validation data set)
augment = False
val_gen = generator(val_path, val_doc, size_batch, num_frames, frame_height, frame_width, augment, normalize)
# Calculate steps per epoch
if (len(val_doc)%size_batch) == 0:
val_steps_per_epoch = int(len(val_doc)/size_batch)
else:
val_steps_per_epoch = (len(val_doc)//size_batch) + 1
# Callback function to save models
curr_dt_time = datetime.datetime.now() # Current date and time to store the models with timestamp
mod_name = model_name + '_' + str(curr_dt_time).replace(' ','').replace(':','_') + '/'
if not os.path.exists(mod_name):
os.mkdir(mod_name)
filepath = mod_name + 'model-{epoch:05d}-{loss:.5f}-{categorical_accuracy:.5f}-{val_loss:.5f}-{val_categorical_accuracy:.5f}.h5'
checkpoint = ModelCheckpoint(filepath, monitor='val_categorical_accuracy', verbose=1, save_best_only=True, save_weights_only=False, mode='auto', period=1)
# Callback function to reduce learning rate upon reaching a plateau
LR = ReduceLROnPlateau(monitor = 'val_loss', factor = 0.2, patience = 4, verbose = 1) # write the REducelronplateau code here
# Early Stop callback function
earlystop = EarlyStopping(monitor = "val_loss", mode='auto', patience = 10, verbose = 1)
# List of all callback functions
callbacks_list = [checkpoint, LR, earlystop]
# Fit the model
history_obj = model.fit_generator(train_gen, steps_per_epoch = train_steps_per_epoch,
epochs = num_epochs, verbose = 1,
callbacks = callbacks_list, workers = 1,
initial_epoch = 0, class_weight = None,
validation_data = val_gen,
validation_steps = val_steps_per_epoch,
shuffle=False)
return history_obj
# Custom function for plotting accuracies and losses of the model
def modelplot(history):
fig = plt.figure(figsize = (16, 5))
# Plot the loss
plt.subplot(121)
plt.title('Loss')
plt.plot(history.history['loss'])
plt.plot(history.history['val_loss'])
plt.legend(['train_loss','val_loss'])
plt.xlabel('Epochs')
plt.ylabel('Loss')
# Plot the accuracy
plt.subplot(122)
plt.title('Accuracy')
plt.plot(history.history['categorical_accuracy'])
plt.plot(history.history['val_categorical_accuracy'])
plt.legend(['train_categorical_accuracy','val_categorical_accuracy'])
plt.xlabel('Epochs')
plt.ylabel('Accuracy')
plt.show()
# Test Conv3D Model for test run
def model_Conv3D_Test(num_frames, frame_height, frame_width):
model = Sequential()
model.add(Conv3D(16, (3,3,3),
input_shape = (num_frames, frame_height, frame_width, 3),
padding = 'same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(3,3,3)))
model.add(Conv3D(32, (2, 2, 2), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(64, (2, 2, 2), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(128, (2, 2, 2), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Flatten())
model.add(Dense(256))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.25))
# Final softmax layer for the 5 classes
model.add(Dense(5))
model.add(Activation('softmax'))
return model
Time taken and the memory needed to train the model is greatly affected by the batch size, image size and number of frames. We do as below:
# Model with image size 100x100
num_frames = 24
frame_height = 100
frame_width = 100
batch_size = 32
num_epochs = 2
augment = False # Add Gaussian Blurring taken care in Generator function
normalize = True # Diving image channel wise by 255 taken care in Generator function
model = model_Conv3D_Test(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
trainer(model, 'Conv3D_Test', num_epochs, batch_size, num_frames, frame_height, frame_width, augment, normalize)
Model: "sequential" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d (Conv3D) (None, 24, 100, 100, 16) 1312 _________________________________________________________________ activation (Activation) (None, 24, 100, 100, 16) 0 _________________________________________________________________ batch_normalization (BatchNo (None, 24, 100, 100, 16) 64 _________________________________________________________________ max_pooling3d (MaxPooling3D) (None, 8, 33, 33, 16) 0 _________________________________________________________________ conv3d_1 (Conv3D) (None, 8, 33, 33, 32) 4128 _________________________________________________________________ activation_1 (Activation) (None, 8, 33, 33, 32) 0 _________________________________________________________________ batch_normalization_1 (Batch (None, 8, 33, 33, 32) 128 _________________________________________________________________ max_pooling3d_1 (MaxPooling3 (None, 4, 16, 16, 32) 0 _________________________________________________________________ conv3d_2 (Conv3D) (None, 4, 16, 16, 64) 16448 _________________________________________________________________ activation_2 (Activation) (None, 4, 16, 16, 64) 0 _________________________________________________________________ batch_normalization_2 (Batch (None, 4, 16, 16, 64) 256 _________________________________________________________________ max_pooling3d_2 (MaxPooling3 (None, 2, 8, 8, 64) 0 _________________________________________________________________ conv3d_3 (Conv3D) (None, 2, 8, 8, 128) 65664 _________________________________________________________________ activation_3 (Activation) (None, 2, 8, 8, 128) 0 _________________________________________________________________ batch_normalization_3 (Batch (None, 2, 8, 8, 128) 512 _________________________________________________________________ max_pooling3d_3 (MaxPooling3 (None, 1, 4, 4, 128) 0 _________________________________________________________________ flatten (Flatten) (None, 2048) 0 _________________________________________________________________ dense (Dense) (None, 256) 524544 _________________________________________________________________ activation_4 (Activation) (None, 256) 0 _________________________________________________________________ batch_normalization_4 (Batch (None, 256) 1024 _________________________________________________________________ dropout (Dropout) (None, 256) 0 _________________________________________________________________ dense_1 (Dense) (None, 5) 1285 _________________________________________________________________ activation_5 (Activation) (None, 5) 0 ================================================================= Total params: 615,365 Trainable params: 614,373 Non-trainable params: 992 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. WARNING:tensorflow:From <ipython-input-30-22dc052cd7be>:46: Model.fit_generator (from tensorflow.python.keras.engine.training) is deprecated and will be removed in a future version. Instructions for updating: Please use Model.fit, which supports generators. Source path = /datasets/Project_data/train ; batch size = 32 Image Indexes: [ 0 1 3 4 5 6 8 9 10 11 13 14 15 16 18 19 20 21 23 24 25 26 28 29] Epoch 1/2 21/21 [==============================] - ETA: 0s - loss: 1.2285 - categorical_accuracy: 0.5686Source path = /datasets/Project_data/val ; batch size = 32 Image Indexes: [ 0 1 3 4 5 6 8 9 10 11 13 14 15 16 18 19 20 21 23 24 25 26 28 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.16000, saving model to Conv3D_Test_2021-05-2705_06_28.754954/model-00001-1.22848-0.56863-2.36517-0.16000.h5 21/21 [==============================] - 58s 3s/step - loss: 1.2285 - categorical_accuracy: 0.5686 - val_loss: 2.3652 - val_categorical_accuracy: 0.1600 Epoch 2/2 21/21 [==============================] - ETA: 0s - loss: 0.4375 - categorical_accuracy: 0.8311 Epoch 00002: val_categorical_accuracy did not improve from 0.16000 21/21 [==============================] - 60s 3s/step - loss: 0.4375 - categorical_accuracy: 0.8311 - val_loss: 3.7260 - val_categorical_accuracy: 0.1600
<tensorflow.python.keras.callbacks.History at 0x7f0f40850e10>
# Model with image size 120x120
num_frames = 24
frame_height = 120
frame_width = 120
size_batch = 32
num_epochs = 2
augment = False
normalize = True
model = model_Conv3D_Test(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
trainer(model, 'Conv3D_Test', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalize)
Model: "sequential_1" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d_4 (Conv3D) (None, 24, 120, 120, 16) 1312 _________________________________________________________________ activation_6 (Activation) (None, 24, 120, 120, 16) 0 _________________________________________________________________ batch_normalization_5 (Batch (None, 24, 120, 120, 16) 64 _________________________________________________________________ max_pooling3d_4 (MaxPooling3 (None, 8, 40, 40, 16) 0 _________________________________________________________________ conv3d_5 (Conv3D) (None, 8, 40, 40, 32) 4128 _________________________________________________________________ activation_7 (Activation) (None, 8, 40, 40, 32) 0 _________________________________________________________________ batch_normalization_6 (Batch (None, 8, 40, 40, 32) 128 _________________________________________________________________ max_pooling3d_5 (MaxPooling3 (None, 4, 20, 20, 32) 0 _________________________________________________________________ conv3d_6 (Conv3D) (None, 4, 20, 20, 64) 16448 _________________________________________________________________ activation_8 (Activation) (None, 4, 20, 20, 64) 0 _________________________________________________________________ batch_normalization_7 (Batch (None, 4, 20, 20, 64) 256 _________________________________________________________________ max_pooling3d_6 (MaxPooling3 (None, 2, 10, 10, 64) 0 _________________________________________________________________ conv3d_7 (Conv3D) (None, 2, 10, 10, 128) 65664 _________________________________________________________________ activation_9 (Activation) (None, 2, 10, 10, 128) 0 _________________________________________________________________ batch_normalization_8 (Batch (None, 2, 10, 10, 128) 512 _________________________________________________________________ max_pooling3d_7 (MaxPooling3 (None, 1, 5, 5, 128) 0 _________________________________________________________________ flatten_1 (Flatten) (None, 3200) 0 _________________________________________________________________ dense_2 (Dense) (None, 256) 819456 _________________________________________________________________ activation_10 (Activation) (None, 256) 0 _________________________________________________________________ batch_normalization_9 (Batch (None, 256) 1024 _________________________________________________________________ dropout_1 (Dropout) (None, 256) 0 _________________________________________________________________ dense_3 (Dense) (None, 5) 1285 _________________________________________________________________ activation_11 (Activation) (None, 5) 0 ================================================================= Total params: 910,277 Trainable params: 909,285 Non-trainable params: 992 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 32 Image Indexes: [ 0 1 3 4 5 6 8 9 10 11 13 14 15 16 18 19 20 21 23 24 25 26 28 29] Epoch 1/2 21/21 [==============================] - ETA: 0s - loss: 1.1662 - categorical_accuracy: 0.5581Source path = /datasets/Project_data/val ; batch size = 32 Image Indexes: [ 0 1 3 4 5 6 8 9 10 11 13 14 15 16 18 19 20 21 23 24 25 26 28 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.21000, saving model to Conv3D_Test_2021-05-2705_08_33.803060/model-00001-1.16617-0.55807-2.41060-0.21000.h5 21/21 [==============================] - 57s 3s/step - loss: 1.1662 - categorical_accuracy: 0.5581 - val_loss: 2.4106 - val_categorical_accuracy: 0.2100 Epoch 2/2 21/21 [==============================] - ETA: 0s - loss: 0.3539 - categorical_accuracy: 0.8733 Epoch 00002: val_categorical_accuracy did not improve from 0.21000 21/21 [==============================] - 57s 3s/step - loss: 0.3539 - categorical_accuracy: 0.8733 - val_loss: 5.6997 - val_categorical_accuracy: 0.1900
<tensorflow.python.keras.callbacks.History at 0x7f0f40b3d208>
# # Model with image size 160x160
# num_frames = 8
# frame_height = 160
# frame_width = 160
# size_batch = 24
# num_epochs = 2
# augment = False
# normalize = True # Diving image channel wise by 255 taken care in Generator function
# model = model_Conv3D_Test(num_frames, frame_height, frame_width)
# optimiser = optimizers.Adam()
# model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
# print(model.summary())
# # Run the model and check accuracy
# trainer(model, 'Conv3D_Test', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalize)
120x120 size seemed better as it takes lesser time and at the same time gives good accuracy. 100x100 size as well in some modelsAs these parameters are decided let us start building the model and decide on other parameters such as filter size, layers etc
def model_Conv3D_1(num_frames, frame_height, frame_width):
model = Sequential()
model.add(Conv3D(16, (3,3,3),
input_shape = (num_frames, frame_height, frame_width, 3),
padding = 'same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Conv3D(32, (3,3,3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(64, (3,3,3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(128, (3,3,3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.25))
# Final softmax layer for the 5 classes
model.add(Dense(5))
model.add(Activation('softmax'))
return model
# Model 1 - Conv3D Model with filter size (3,3,3)
frame_height = 120
frame_width = 120
num_frames = 16
size_batch = 32
num_epochs = 20
augment = False
normalize = True
model = model_Conv3D_1(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
model_history = trainer(model, 'Conv3D_1', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalize)
modelplot(model_history)
Model: "sequential_2" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d_8 (Conv3D) (None, 16, 120, 120, 16) 1312 _________________________________________________________________ activation_12 (Activation) (None, 16, 120, 120, 16) 0 _________________________________________________________________ batch_normalization_10 (Batc (None, 16, 120, 120, 16) 64 _________________________________________________________________ max_pooling3d_8 (MaxPooling3 (None, 8, 60, 60, 16) 0 _________________________________________________________________ conv3d_9 (Conv3D) (None, 8, 60, 60, 32) 13856 _________________________________________________________________ activation_13 (Activation) (None, 8, 60, 60, 32) 0 _________________________________________________________________ batch_normalization_11 (Batc (None, 8, 60, 60, 32) 128 _________________________________________________________________ max_pooling3d_9 (MaxPooling3 (None, 4, 30, 30, 32) 0 _________________________________________________________________ conv3d_10 (Conv3D) (None, 4, 30, 30, 64) 55360 _________________________________________________________________ activation_14 (Activation) (None, 4, 30, 30, 64) 0 _________________________________________________________________ batch_normalization_12 (Batc (None, 4, 30, 30, 64) 256 _________________________________________________________________ max_pooling3d_10 (MaxPooling (None, 2, 15, 15, 64) 0 _________________________________________________________________ conv3d_11 (Conv3D) (None, 2, 15, 15, 128) 221312 _________________________________________________________________ activation_15 (Activation) (None, 2, 15, 15, 128) 0 _________________________________________________________________ batch_normalization_13 (Batc (None, 2, 15, 15, 128) 512 _________________________________________________________________ max_pooling3d_11 (MaxPooling (None, 1, 7, 7, 128) 0 _________________________________________________________________ flatten_2 (Flatten) (None, 6272) 0 _________________________________________________________________ dense_4 (Dense) (None, 128) 802944 _________________________________________________________________ activation_16 (Activation) (None, 128) 0 _________________________________________________________________ batch_normalization_14 (Batc (None, 128) 512 _________________________________________________________________ dropout_2 (Dropout) (None, 128) 0 _________________________________________________________________ dense_5 (Dense) (None, 256) 33024 _________________________________________________________________ activation_17 (Activation) (None, 256) 0 _________________________________________________________________ dropout_3 (Dropout) (None, 256) 0 _________________________________________________________________ dense_6 (Dense) (None, 5) 1285 _________________________________________________________________ activation_18 (Activation) (None, 5) 0 ================================================================= Total params: 1,130,565 Trainable params: 1,129,829 Non-trainable params: 736 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 1/20 21/21 [==============================] - ETA: 0s - loss: 1.4067 - categorical_accuracy: 0.4329Source path = /datasets/Project_data/val ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.24000, saving model to Conv3D_1_2021-05-2705_10_32.110435/model-00001-1.40672-0.43288-3.23192-0.24000.h5 21/21 [==============================] - 38s 2s/step - loss: 1.4067 - categorical_accuracy: 0.4329 - val_loss: 3.2319 - val_categorical_accuracy: 0.2400 Epoch 2/20 21/21 [==============================] - ETA: 0s - loss: 0.9610 - categorical_accuracy: 0.6154 Epoch 00002: val_categorical_accuracy did not improve from 0.24000 21/21 [==============================] - 38s 2s/step - loss: 0.9610 - categorical_accuracy: 0.6154 - val_loss: 2.5691 - val_categorical_accuracy: 0.1900 Epoch 3/20 21/21 [==============================] - ETA: 0s - loss: 0.6185 - categorical_accuracy: 0.7632 Epoch 00003: val_categorical_accuracy did not improve from 0.24000 21/21 [==============================] - 39s 2s/step - loss: 0.6185 - categorical_accuracy: 0.7632 - val_loss: 4.3601 - val_categorical_accuracy: 0.2400 Epoch 4/20 21/21 [==============================] - ETA: 0s - loss: 0.4721 - categorical_accuracy: 0.8175 Epoch 00004: val_categorical_accuracy did not improve from 0.24000 21/21 [==============================] - 36s 2s/step - loss: 0.4721 - categorical_accuracy: 0.8175 - val_loss: 5.1659 - val_categorical_accuracy: 0.2000 Epoch 5/20 21/21 [==============================] - ETA: 0s - loss: 0.3327 - categorical_accuracy: 0.8854 Epoch 00005: val_categorical_accuracy did not improve from 0.24000 21/21 [==============================] - 38s 2s/step - loss: 0.3327 - categorical_accuracy: 0.8854 - val_loss: 6.4046 - val_categorical_accuracy: 0.2100 Epoch 6/20 21/21 [==============================] - ETA: 0s - loss: 0.2388 - categorical_accuracy: 0.9246 Epoch 00006: val_categorical_accuracy did not improve from 0.24000 Epoch 00006: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. 21/21 [==============================] - 38s 2s/step - loss: 0.2388 - categorical_accuracy: 0.9246 - val_loss: 7.2221 - val_categorical_accuracy: 0.2400 Epoch 7/20 21/21 [==============================] - ETA: 0s - loss: 0.1630 - categorical_accuracy: 0.9472 Epoch 00007: val_categorical_accuracy did not improve from 0.24000 21/21 [==============================] - 39s 2s/step - loss: 0.1630 - categorical_accuracy: 0.9472 - val_loss: 7.1195 - val_categorical_accuracy: 0.2000 Epoch 8/20 21/21 [==============================] - ETA: 0s - loss: 0.1075 - categorical_accuracy: 0.9713 Epoch 00008: val_categorical_accuracy did not improve from 0.24000 21/21 [==============================] - 36s 2s/step - loss: 0.1075 - categorical_accuracy: 0.9713 - val_loss: 7.1437 - val_categorical_accuracy: 0.2100 Epoch 9/20 21/21 [==============================] - ETA: 0s - loss: 0.1134 - categorical_accuracy: 0.9729 Epoch 00009: val_categorical_accuracy did not improve from 0.24000 21/21 [==============================] - 38s 2s/step - loss: 0.1134 - categorical_accuracy: 0.9729 - val_loss: 7.3271 - val_categorical_accuracy: 0.2100 Epoch 10/20 21/21 [==============================] - ETA: 0s - loss: 0.0808 - categorical_accuracy: 0.9819 Epoch 00010: val_categorical_accuracy did not improve from 0.24000 Epoch 00010: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05. 21/21 [==============================] - 38s 2s/step - loss: 0.0808 - categorical_accuracy: 0.9819 - val_loss: 7.2648 - val_categorical_accuracy: 0.2100 Epoch 11/20 21/21 [==============================] - ETA: 0s - loss: 0.0742 - categorical_accuracy: 0.9910 Epoch 00011: val_categorical_accuracy did not improve from 0.24000 21/21 [==============================] - 39s 2s/step - loss: 0.0742 - categorical_accuracy: 0.9910 - val_loss: 7.2001 - val_categorical_accuracy: 0.2200 Epoch 12/20 21/21 [==============================] - ETA: 0s - loss: 0.0575 - categorical_accuracy: 0.9940 Epoch 00012: val_categorical_accuracy did not improve from 0.24000 21/21 [==============================] - 37s 2s/step - loss: 0.0575 - categorical_accuracy: 0.9940 - val_loss: 7.1138 - val_categorical_accuracy: 0.2100 Epoch 00012: early stopping
# Model 2 - Conv3D Model with filter size (2,2,2)
def model_Conv3D_2(num_frames, frame_height, frame_width):
model = Sequential()
model.add(Conv3D(16, (2,2,2),
input_shape = (num_frames, frame_height, frame_width, 3),
padding = 'same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Conv3D(32, (2, 2, 2), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(64, (2, 2, 2), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(128, (2, 2, 2), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Flatten())
model.add(Dense(128))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(256))
model.add(Activation('relu'))
model.add(Dropout(0.25))
# Final softmax layer for the 5 classes
model.add(Dense(5))
model.add(Activation('softmax'))
return model
# Model 2 - Conv3D Model with filter size (2,2,2)
frame_height = 120
frame_width = 120
num_frames = 16
size_batch = 32
num_epochs = 20
augment = False
normalization = True
model = model_Conv3D_2(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
model_history = trainer(model, 'Conv3D_2', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalization)
modelplot(model_history)
Model: "sequential_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d_12 (Conv3D) (None, 16, 120, 120, 16) 400 _________________________________________________________________ activation_19 (Activation) (None, 16, 120, 120, 16) 0 _________________________________________________________________ batch_normalization_15 (Batc (None, 16, 120, 120, 16) 64 _________________________________________________________________ max_pooling3d_12 (MaxPooling (None, 8, 60, 60, 16) 0 _________________________________________________________________ conv3d_13 (Conv3D) (None, 8, 60, 60, 32) 4128 _________________________________________________________________ activation_20 (Activation) (None, 8, 60, 60, 32) 0 _________________________________________________________________ batch_normalization_16 (Batc (None, 8, 60, 60, 32) 128 _________________________________________________________________ max_pooling3d_13 (MaxPooling (None, 4, 30, 30, 32) 0 _________________________________________________________________ conv3d_14 (Conv3D) (None, 4, 30, 30, 64) 16448 _________________________________________________________________ activation_21 (Activation) (None, 4, 30, 30, 64) 0 _________________________________________________________________ batch_normalization_17 (Batc (None, 4, 30, 30, 64) 256 _________________________________________________________________ max_pooling3d_14 (MaxPooling (None, 2, 15, 15, 64) 0 _________________________________________________________________ conv3d_15 (Conv3D) (None, 2, 15, 15, 128) 65664 _________________________________________________________________ activation_22 (Activation) (None, 2, 15, 15, 128) 0 _________________________________________________________________ batch_normalization_18 (Batc (None, 2, 15, 15, 128) 512 _________________________________________________________________ max_pooling3d_15 (MaxPooling (None, 1, 7, 7, 128) 0 _________________________________________________________________ flatten_3 (Flatten) (None, 6272) 0 _________________________________________________________________ dense_7 (Dense) (None, 128) 802944 _________________________________________________________________ activation_23 (Activation) (None, 128) 0 _________________________________________________________________ batch_normalization_19 (Batc (None, 128) 512 _________________________________________________________________ dropout_4 (Dropout) (None, 128) 0 _________________________________________________________________ dense_8 (Dense) (None, 256) 33024 _________________________________________________________________ activation_24 (Activation) (None, 256) 0 _________________________________________________________________ dropout_5 (Dropout) (None, 256) 0 _________________________________________________________________ dense_9 (Dense) (None, 5) 1285 _________________________________________________________________ activation_25 (Activation) (None, 5) 0 ================================================================= Total params: 925,365 Trainable params: 924,629 Non-trainable params: 736 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 1/20 21/21 [==============================] - ETA: 0s - loss: 1.3835 - categorical_accuracy: 0.4495Source path = /datasets/Project_data/val ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.16000, saving model to Conv3D_2_2021-05-2705_18_12.542587/model-00001-1.38351-0.44947-1.97066-0.16000.h5 21/21 [==============================] - 38s 2s/step - loss: 1.3835 - categorical_accuracy: 0.4495 - val_loss: 1.9707 - val_categorical_accuracy: 0.1600 Epoch 2/20 21/21 [==============================] - ETA: 0s - loss: 0.8111 - categorical_accuracy: 0.6878 Epoch 00002: val_categorical_accuracy improved from 0.16000 to 0.17000, saving model to Conv3D_2_2021-05-2705_18_12.542587/model-00002-0.81114-0.68778-2.53969-0.17000.h5 21/21 [==============================] - 38s 2s/step - loss: 0.8111 - categorical_accuracy: 0.6878 - val_loss: 2.5397 - val_categorical_accuracy: 0.1700 Epoch 3/20 21/21 [==============================] - ETA: 0s - loss: 0.5523 - categorical_accuracy: 0.8009 Epoch 00003: val_categorical_accuracy did not improve from 0.17000 21/21 [==============================] - 39s 2s/step - loss: 0.5523 - categorical_accuracy: 0.8009 - val_loss: 3.2016 - val_categorical_accuracy: 0.1600 Epoch 4/20 21/21 [==============================] - ETA: 0s - loss: 0.3661 - categorical_accuracy: 0.8808 Epoch 00004: val_categorical_accuracy did not improve from 0.17000 21/21 [==============================] - 37s 2s/step - loss: 0.3661 - categorical_accuracy: 0.8808 - val_loss: 4.1255 - val_categorical_accuracy: 0.1500 Epoch 5/20 21/21 [==============================] - ETA: 0s - loss: 0.2545 - categorical_accuracy: 0.9095 Epoch 00005: val_categorical_accuracy did not improve from 0.17000 Epoch 00005: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. 21/21 [==============================] - 38s 2s/step - loss: 0.2545 - categorical_accuracy: 0.9095 - val_loss: 5.1527 - val_categorical_accuracy: 0.1600 Epoch 6/20 21/21 [==============================] - ETA: 0s - loss: 0.1763 - categorical_accuracy: 0.9487 Epoch 00006: val_categorical_accuracy did not improve from 0.17000 21/21 [==============================] - 38s 2s/step - loss: 0.1763 - categorical_accuracy: 0.9487 - val_loss: 5.9809 - val_categorical_accuracy: 0.1600 Epoch 7/20 21/21 [==============================] - ETA: 0s - loss: 0.1192 - categorical_accuracy: 0.9759 Epoch 00007: val_categorical_accuracy did not improve from 0.17000 21/21 [==============================] - 38s 2s/step - loss: 0.1192 - categorical_accuracy: 0.9759 - val_loss: 6.5865 - val_categorical_accuracy: 0.1500 Epoch 8/20 21/21 [==============================] - ETA: 0s - loss: 0.0918 - categorical_accuracy: 0.9879 Epoch 00008: val_categorical_accuracy did not improve from 0.17000 21/21 [==============================] - 36s 2s/step - loss: 0.0918 - categorical_accuracy: 0.9879 - val_loss: 7.3387 - val_categorical_accuracy: 0.1500 Epoch 9/20 21/21 [==============================] - ETA: 0s - loss: 0.0871 - categorical_accuracy: 0.9759 Epoch 00009: val_categorical_accuracy did not improve from 0.17000 Epoch 00009: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05. 21/21 [==============================] - 38s 2s/step - loss: 0.0871 - categorical_accuracy: 0.9759 - val_loss: 7.5378 - val_categorical_accuracy: 0.1600 Epoch 10/20 21/21 [==============================] - ETA: 0s - loss: 0.0741 - categorical_accuracy: 0.9864 Epoch 00010: val_categorical_accuracy did not improve from 0.17000 21/21 [==============================] - 38s 2s/step - loss: 0.0741 - categorical_accuracy: 0.9864 - val_loss: 7.7873 - val_categorical_accuracy: 0.1400 Epoch 11/20 21/21 [==============================] - ETA: 0s - loss: 0.0801 - categorical_accuracy: 0.9819 Epoch 00011: val_categorical_accuracy did not improve from 0.17000 21/21 [==============================] - 38s 2s/step - loss: 0.0801 - categorical_accuracy: 0.9819 - val_loss: 7.2121 - val_categorical_accuracy: 0.1700 Epoch 00011: early stopping
# Model 3 - Conv3D Model with filter size (3,3,3)
def model_Conv3D_3(num_frames, frame_height, frame_width):
model = Sequential()
model.add(Conv3D(16, (3,3,3),
input_shape = (num_frames, frame_height, frame_width, 3),
padding = 'same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Conv3D(32, (3,3,3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(64, (3,3,3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Conv3D(128, (3,3,3), padding='same'))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(MaxPooling3D(pool_size=(2, 2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(BatchNormalization())
model.add(Dropout(0.5))
model.add(Dense(128))
model.add(Activation('relu'))
model.add(Dropout(0.25))
# Final softmax layer for the 5 classes
model.add(Dense(5))
model.add(Activation('softmax'))
return model
# Model 3 - Conv3D Model with filter size (3,3,3)
frame_height = 120
frame_width = 120
num_frames = 16
size_batch = 32
num_epochs = 20
augment = True
normalization = True
model = model_Conv3D_3(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
model_history = trainer(model, 'Conv3D_3_1', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalization)
modelplot(model_history)
Model: "sequential_4" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d_16 (Conv3D) (None, 16, 120, 120, 16) 1312 _________________________________________________________________ activation_26 (Activation) (None, 16, 120, 120, 16) 0 _________________________________________________________________ batch_normalization_20 (Batc (None, 16, 120, 120, 16) 64 _________________________________________________________________ max_pooling3d_16 (MaxPooling (None, 8, 60, 60, 16) 0 _________________________________________________________________ conv3d_17 (Conv3D) (None, 8, 60, 60, 32) 13856 _________________________________________________________________ activation_27 (Activation) (None, 8, 60, 60, 32) 0 _________________________________________________________________ batch_normalization_21 (Batc (None, 8, 60, 60, 32) 128 _________________________________________________________________ max_pooling3d_17 (MaxPooling (None, 4, 30, 30, 32) 0 _________________________________________________________________ conv3d_18 (Conv3D) (None, 4, 30, 30, 64) 55360 _________________________________________________________________ activation_28 (Activation) (None, 4, 30, 30, 64) 0 _________________________________________________________________ batch_normalization_22 (Batc (None, 4, 30, 30, 64) 256 _________________________________________________________________ max_pooling3d_18 (MaxPooling (None, 2, 15, 15, 64) 0 _________________________________________________________________ conv3d_19 (Conv3D) (None, 2, 15, 15, 128) 221312 _________________________________________________________________ activation_29 (Activation) (None, 2, 15, 15, 128) 0 _________________________________________________________________ batch_normalization_23 (Batc (None, 2, 15, 15, 128) 512 _________________________________________________________________ max_pooling3d_19 (MaxPooling (None, 1, 7, 7, 128) 0 _________________________________________________________________ flatten_4 (Flatten) (None, 6272) 0 _________________________________________________________________ dense_10 (Dense) (None, 64) 401472 _________________________________________________________________ activation_30 (Activation) (None, 64) 0 _________________________________________________________________ batch_normalization_24 (Batc (None, 64) 256 _________________________________________________________________ dropout_6 (Dropout) (None, 64) 0 _________________________________________________________________ dense_11 (Dense) (None, 128) 8320 _________________________________________________________________ activation_31 (Activation) (None, 128) 0 _________________________________________________________________ dropout_7 (Dropout) (None, 128) 0 _________________________________________________________________ dense_12 (Dense) (None, 5) 645 _________________________________________________________________ activation_32 (Activation) (None, 5) 0 ================================================================= Total params: 703,493 Trainable params: 702,885 Non-trainable params: 608 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 1/20 21/21 [==============================] - ETA: 0s - loss: 1.4446 - categorical_accuracy: 0.4020Source path = /datasets/Project_data/val ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.21000, saving model to Conv3D_3_1_2021-05-2705_25_13.353350/model-00001-1.44459-0.40196-1.80248-0.21000.h5 21/21 [==============================] - 48s 2s/step - loss: 1.4446 - categorical_accuracy: 0.4020 - val_loss: 1.8025 - val_categorical_accuracy: 0.2100 Epoch 2/20 21/21 [==============================] - ETA: 0s - loss: 0.8626 - categorical_accuracy: 0.6719 Epoch 00002: val_categorical_accuracy improved from 0.21000 to 0.23000, saving model to Conv3D_3_1_2021-05-2705_25_13.353350/model-00002-0.86261-0.67195-2.54102-0.23000.h5 21/21 [==============================] - 47s 2s/step - loss: 0.8626 - categorical_accuracy: 0.6719 - val_loss: 2.5410 - val_categorical_accuracy: 0.2300 Epoch 3/20 21/21 [==============================] - ETA: 0s - loss: 0.5549 - categorical_accuracy: 0.8115 Epoch 00003: val_categorical_accuracy did not improve from 0.23000 21/21 [==============================] - 47s 2s/step - loss: 0.5549 - categorical_accuracy: 0.8115 - val_loss: 3.7548 - val_categorical_accuracy: 0.2200 Epoch 4/20 21/21 [==============================] - ETA: 0s - loss: 0.4007 - categorical_accuracy: 0.8673 Epoch 00004: val_categorical_accuracy did not improve from 0.23000 21/21 [==============================] - 46s 2s/step - loss: 0.4007 - categorical_accuracy: 0.8673 - val_loss: 4.4041 - val_categorical_accuracy: 0.2200 Epoch 5/20 21/21 [==============================] - ETA: 0s - loss: 0.2599 - categorical_accuracy: 0.9216 Epoch 00005: val_categorical_accuracy did not improve from 0.23000 Epoch 00005: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. 21/21 [==============================] - 47s 2s/step - loss: 0.2599 - categorical_accuracy: 0.9216 - val_loss: 5.6711 - val_categorical_accuracy: 0.2200 Epoch 6/20 21/21 [==============================] - ETA: 0s - loss: 0.1941 - categorical_accuracy: 0.9540 Epoch 00006: val_categorical_accuracy improved from 0.23000 to 0.30000, saving model to Conv3D_3_1_2021-05-2705_25_13.353350/model-00006-0.19411-0.95400-5.73485-0.30000.h5 21/21 [==============================] - 47s 2s/step - loss: 0.1941 - categorical_accuracy: 0.9540 - val_loss: 5.7349 - val_categorical_accuracy: 0.3000 Epoch 7/20 21/21 [==============================] - ETA: 0s - loss: 0.1529 - categorical_accuracy: 0.9668 Epoch 00007: val_categorical_accuracy did not improve from 0.30000 21/21 [==============================] - 47s 2s/step - loss: 0.1529 - categorical_accuracy: 0.9668 - val_loss: 5.8719 - val_categorical_accuracy: 0.2900 Epoch 8/20 21/21 [==============================] - ETA: 0s - loss: 0.1253 - categorical_accuracy: 0.9729 Epoch 00008: val_categorical_accuracy did not improve from 0.30000 21/21 [==============================] - 46s 2s/step - loss: 0.1253 - categorical_accuracy: 0.9729 - val_loss: 5.7937 - val_categorical_accuracy: 0.2500 Epoch 9/20 21/21 [==============================] - ETA: 0s - loss: 0.1090 - categorical_accuracy: 0.9811 Epoch 00009: val_categorical_accuracy did not improve from 0.30000 Epoch 00009: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05. 21/21 [==============================] - 47s 2s/step - loss: 0.1090 - categorical_accuracy: 0.9811 - val_loss: 5.8585 - val_categorical_accuracy: 0.2800 Epoch 10/20 21/21 [==============================] - ETA: 0s - loss: 0.0845 - categorical_accuracy: 0.9902 Epoch 00010: val_categorical_accuracy did not improve from 0.30000 21/21 [==============================] - 47s 2s/step - loss: 0.0845 - categorical_accuracy: 0.9902 - val_loss: 6.1107 - val_categorical_accuracy: 0.2400 Epoch 11/20 21/21 [==============================] - ETA: 0s - loss: 0.0843 - categorical_accuracy: 0.9872 Epoch 00011: val_categorical_accuracy improved from 0.30000 to 0.34000, saving model to Conv3D_3_1_2021-05-2705_25_13.353350/model-00011-0.08431-0.98718-5.03659-0.34000.h5 21/21 [==============================] - 47s 2s/step - loss: 0.0843 - categorical_accuracy: 0.9872 - val_loss: 5.0366 - val_categorical_accuracy: 0.3400 Epoch 00011: early stopping
# Model 4 - Conv3D Model with filter size (3,3,3) without Normalization
frame_height = 120
frame_width = 120
num_frames = 16
size_batch = 32
num_epochs = 20
augment = True
normalization = False
model = model_Conv3D_3(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
model_history = trainer(model, 'Conv3D_3_2', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalization)
modelplot(model_history)
Model: "sequential_5" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d_20 (Conv3D) (None, 16, 120, 120, 16) 1312 _________________________________________________________________ activation_33 (Activation) (None, 16, 120, 120, 16) 0 _________________________________________________________________ batch_normalization_25 (Batc (None, 16, 120, 120, 16) 64 _________________________________________________________________ max_pooling3d_20 (MaxPooling (None, 8, 60, 60, 16) 0 _________________________________________________________________ conv3d_21 (Conv3D) (None, 8, 60, 60, 32) 13856 _________________________________________________________________ activation_34 (Activation) (None, 8, 60, 60, 32) 0 _________________________________________________________________ batch_normalization_26 (Batc (None, 8, 60, 60, 32) 128 _________________________________________________________________ max_pooling3d_21 (MaxPooling (None, 4, 30, 30, 32) 0 _________________________________________________________________ conv3d_22 (Conv3D) (None, 4, 30, 30, 64) 55360 _________________________________________________________________ activation_35 (Activation) (None, 4, 30, 30, 64) 0 _________________________________________________________________ batch_normalization_27 (Batc (None, 4, 30, 30, 64) 256 _________________________________________________________________ max_pooling3d_22 (MaxPooling (None, 2, 15, 15, 64) 0 _________________________________________________________________ conv3d_23 (Conv3D) (None, 2, 15, 15, 128) 221312 _________________________________________________________________ activation_36 (Activation) (None, 2, 15, 15, 128) 0 _________________________________________________________________ batch_normalization_28 (Batc (None, 2, 15, 15, 128) 512 _________________________________________________________________ max_pooling3d_23 (MaxPooling (None, 1, 7, 7, 128) 0 _________________________________________________________________ flatten_5 (Flatten) (None, 6272) 0 _________________________________________________________________ dense_13 (Dense) (None, 64) 401472 _________________________________________________________________ activation_37 (Activation) (None, 64) 0 _________________________________________________________________ batch_normalization_29 (Batc (None, 64) 256 _________________________________________________________________ dropout_8 (Dropout) (None, 64) 0 _________________________________________________________________ dense_14 (Dense) (None, 128) 8320 _________________________________________________________________ activation_38 (Activation) (None, 128) 0 _________________________________________________________________ dropout_9 (Dropout) (None, 128) 0 _________________________________________________________________ dense_15 (Dense) (None, 5) 645 _________________________________________________________________ activation_39 (Activation) (None, 5) 0 ================================================================= Total params: 703,493 Trainable params: 702,885 Non-trainable params: 608 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 1/20 21/21 [==============================] - ETA: 0s - loss: 1.4919 - categorical_accuracy: 0.3974Source path = /datasets/Project_data/val ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.23000, saving model to Conv3D_3_2_2021-05-2705_33_58.719379/model-00001-1.49194-0.39744-13.21899-0.23000.h5 21/21 [==============================] - 47s 2s/step - loss: 1.4919 - categorical_accuracy: 0.3974 - val_loss: 13.2190 - val_categorical_accuracy: 0.2300 Epoch 2/20 21/21 [==============================] - ETA: 0s - loss: 0.9764 - categorical_accuracy: 0.6282 Epoch 00002: val_categorical_accuracy improved from 0.23000 to 0.26000, saving model to Conv3D_3_2_2021-05-2705_33_58.719379/model-00002-0.97638-0.62821-5.65396-0.26000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.9764 - categorical_accuracy: 0.6282 - val_loss: 5.6540 - val_categorical_accuracy: 0.2600 Epoch 3/20 21/21 [==============================] - ETA: 0s - loss: 0.6192 - categorical_accuracy: 0.7670 Epoch 00003: val_categorical_accuracy improved from 0.26000 to 0.38000, saving model to Conv3D_3_2_2021-05-2705_33_58.719379/model-00003-0.61918-0.76697-2.79129-0.38000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.6192 - categorical_accuracy: 0.7670 - val_loss: 2.7913 - val_categorical_accuracy: 0.3800 Epoch 4/20 21/21 [==============================] - ETA: 0s - loss: 0.4394 - categorical_accuracy: 0.8560 Epoch 00004: val_categorical_accuracy improved from 0.38000 to 0.46000, saving model to Conv3D_3_2_2021-05-2705_33_58.719379/model-00004-0.43937-0.85596-1.42502-0.46000.h5 21/21 [==============================] - 44s 2s/step - loss: 0.4394 - categorical_accuracy: 0.8560 - val_loss: 1.4250 - val_categorical_accuracy: 0.4600 Epoch 5/20 21/21 [==============================] - ETA: 0s - loss: 0.3066 - categorical_accuracy: 0.9125 Epoch 00005: val_categorical_accuracy improved from 0.46000 to 0.73000, saving model to Conv3D_3_2_2021-05-2705_33_58.719379/model-00005-0.30660-0.91252-0.69536-0.73000.h5 21/21 [==============================] - 45s 2s/step - loss: 0.3066 - categorical_accuracy: 0.9125 - val_loss: 0.6954 - val_categorical_accuracy: 0.7300 Epoch 6/20 21/21 [==============================] - ETA: 0s - loss: 0.1969 - categorical_accuracy: 0.9457 Epoch 00006: val_categorical_accuracy did not improve from 0.73000 21/21 [==============================] - 46s 2s/step - loss: 0.1969 - categorical_accuracy: 0.9457 - val_loss: 0.8776 - val_categorical_accuracy: 0.6300 Epoch 7/20 21/21 [==============================] - ETA: 0s - loss: 0.1502 - categorical_accuracy: 0.9600 Epoch 00007: val_categorical_accuracy did not improve from 0.73000 21/21 [==============================] - 46s 2s/step - loss: 0.1502 - categorical_accuracy: 0.9600 - val_loss: 1.1830 - val_categorical_accuracy: 0.5900 Epoch 8/20 21/21 [==============================] - ETA: 0s - loss: 0.1205 - categorical_accuracy: 0.9729 Epoch 00008: val_categorical_accuracy did not improve from 0.73000 21/21 [==============================] - 45s 2s/step - loss: 0.1205 - categorical_accuracy: 0.9729 - val_loss: 0.7093 - val_categorical_accuracy: 0.7300 Epoch 9/20 21/21 [==============================] - ETA: 0s - loss: 0.0945 - categorical_accuracy: 0.9766 Epoch 00009: val_categorical_accuracy improved from 0.73000 to 0.80000, saving model to Conv3D_3_2_2021-05-2705_33_58.719379/model-00009-0.09449-0.97662-0.47436-0.80000.h5 21/21 [==============================] - 45s 2s/step - loss: 0.0945 - categorical_accuracy: 0.9766 - val_loss: 0.4744 - val_categorical_accuracy: 0.8000 Epoch 10/20 21/21 [==============================] - ETA: 0s - loss: 0.0917 - categorical_accuracy: 0.9789 Epoch 00010: val_categorical_accuracy did not improve from 0.80000 21/21 [==============================] - 46s 2s/step - loss: 0.0917 - categorical_accuracy: 0.9789 - val_loss: 0.7147 - val_categorical_accuracy: 0.7200 Epoch 11/20 21/21 [==============================] - ETA: 0s - loss: 0.0702 - categorical_accuracy: 0.9804 Epoch 00011: val_categorical_accuracy improved from 0.80000 to 0.82000, saving model to Conv3D_3_2_2021-05-2705_33_58.719379/model-00011-0.07020-0.98039-0.49404-0.82000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.0702 - categorical_accuracy: 0.9804 - val_loss: 0.4940 - val_categorical_accuracy: 0.8200 Epoch 12/20 21/21 [==============================] - ETA: 0s - loss: 0.0461 - categorical_accuracy: 0.9902 Epoch 00012: val_categorical_accuracy improved from 0.82000 to 0.91000, saving model to Conv3D_3_2_2021-05-2705_33_58.719379/model-00012-0.04612-0.99020-0.27720-0.91000.h5 21/21 [==============================] - 44s 2s/step - loss: 0.0461 - categorical_accuracy: 0.9902 - val_loss: 0.2772 - val_categorical_accuracy: 0.9100 Epoch 13/20 21/21 [==============================] - ETA: 0s - loss: 0.0313 - categorical_accuracy: 0.9940 Epoch 00013: val_categorical_accuracy improved from 0.91000 to 0.92000, saving model to Conv3D_3_2_2021-05-2705_33_58.719379/model-00013-0.03128-0.99397-0.27023-0.92000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.0313 - categorical_accuracy: 0.9940 - val_loss: 0.2702 - val_categorical_accuracy: 0.9200 Epoch 14/20 21/21 [==============================] - ETA: 0s - loss: 0.0428 - categorical_accuracy: 0.9864 Epoch 00014: val_categorical_accuracy did not improve from 0.92000 21/21 [==============================] - 46s 2s/step - loss: 0.0428 - categorical_accuracy: 0.9864 - val_loss: 0.5556 - val_categorical_accuracy: 0.8400 Epoch 15/20 21/21 [==============================] - ETA: 0s - loss: 0.0484 - categorical_accuracy: 0.9842 Epoch 00015: val_categorical_accuracy did not improve from 0.92000 21/21 [==============================] - 46s 2s/step - loss: 0.0484 - categorical_accuracy: 0.9842 - val_loss: 1.2308 - val_categorical_accuracy: 0.7200 Epoch 16/20 21/21 [==============================] - ETA: 0s - loss: 0.0485 - categorical_accuracy: 0.9864 Epoch 00016: val_categorical_accuracy did not improve from 0.92000 21/21 [==============================] - 44s 2s/step - loss: 0.0485 - categorical_accuracy: 0.9864 - val_loss: 0.5842 - val_categorical_accuracy: 0.8200 Epoch 17/20 21/21 [==============================] - ETA: 0s - loss: 0.0489 - categorical_accuracy: 0.9872 Epoch 00017: val_categorical_accuracy did not improve from 0.92000 Epoch 00017: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. 21/21 [==============================] - 46s 2s/step - loss: 0.0489 - categorical_accuracy: 0.9872 - val_loss: 1.2473 - val_categorical_accuracy: 0.7100 Epoch 18/20 21/21 [==============================] - ETA: 0s - loss: 0.0355 - categorical_accuracy: 0.9925 Epoch 00018: val_categorical_accuracy did not improve from 0.92000 21/21 [==============================] - 46s 2s/step - loss: 0.0355 - categorical_accuracy: 0.9925 - val_loss: 0.7195 - val_categorical_accuracy: 0.7800 Epoch 19/20 21/21 [==============================] - ETA: 0s - loss: 0.0332 - categorical_accuracy: 0.9887 Epoch 00019: val_categorical_accuracy did not improve from 0.92000 21/21 [==============================] - 46s 2s/step - loss: 0.0332 - categorical_accuracy: 0.9887 - val_loss: 0.7630 - val_categorical_accuracy: 0.7700 Epoch 20/20 21/21 [==============================] - ETA: 0s - loss: 0.0211 - categorical_accuracy: 0.9977 Epoch 00020: val_categorical_accuracy did not improve from 0.92000 21/21 [==============================] - 44s 2s/step - loss: 0.0211 - categorical_accuracy: 0.9977 - val_loss: 0.4307 - val_categorical_accuracy: 0.8800
92% validation accuracy and 99% train accuracy# Model 5 - Conv3D Model with filter size (3,3,3)
# This model is same as above but with SGD Optimizer
frame_height = 120
frame_width = 120
num_frames = 16
size_batch = 32
num_epochs = 20
augment = True
normalization = False
model = model_Conv3D_3(num_frames, frame_height, frame_width)
optimiser = optimizers.SGD()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
model_history = trainer(model, 'Conv3D_3_3', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalization)
modelplot(model_history)
Model: "sequential_6" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d_24 (Conv3D) (None, 16, 120, 120, 16) 1312 _________________________________________________________________ activation_40 (Activation) (None, 16, 120, 120, 16) 0 _________________________________________________________________ batch_normalization_30 (Batc (None, 16, 120, 120, 16) 64 _________________________________________________________________ max_pooling3d_24 (MaxPooling (None, 8, 60, 60, 16) 0 _________________________________________________________________ conv3d_25 (Conv3D) (None, 8, 60, 60, 32) 13856 _________________________________________________________________ activation_41 (Activation) (None, 8, 60, 60, 32) 0 _________________________________________________________________ batch_normalization_31 (Batc (None, 8, 60, 60, 32) 128 _________________________________________________________________ max_pooling3d_25 (MaxPooling (None, 4, 30, 30, 32) 0 _________________________________________________________________ conv3d_26 (Conv3D) (None, 4, 30, 30, 64) 55360 _________________________________________________________________ activation_42 (Activation) (None, 4, 30, 30, 64) 0 _________________________________________________________________ batch_normalization_32 (Batc (None, 4, 30, 30, 64) 256 _________________________________________________________________ max_pooling3d_26 (MaxPooling (None, 2, 15, 15, 64) 0 _________________________________________________________________ conv3d_27 (Conv3D) (None, 2, 15, 15, 128) 221312 _________________________________________________________________ activation_43 (Activation) (None, 2, 15, 15, 128) 0 _________________________________________________________________ batch_normalization_33 (Batc (None, 2, 15, 15, 128) 512 _________________________________________________________________ max_pooling3d_27 (MaxPooling (None, 1, 7, 7, 128) 0 _________________________________________________________________ flatten_6 (Flatten) (None, 6272) 0 _________________________________________________________________ dense_16 (Dense) (None, 64) 401472 _________________________________________________________________ activation_44 (Activation) (None, 64) 0 _________________________________________________________________ batch_normalization_34 (Batc (None, 64) 256 _________________________________________________________________ dropout_10 (Dropout) (None, 64) 0 _________________________________________________________________ dense_17 (Dense) (None, 128) 8320 _________________________________________________________________ activation_45 (Activation) (None, 128) 0 _________________________________________________________________ dropout_11 (Dropout) (None, 128) 0 _________________________________________________________________ dense_18 (Dense) (None, 5) 645 _________________________________________________________________ activation_46 (Activation) (None, 5) 0 ================================================================= Total params: 703,493 Trainable params: 702,885 Non-trainable params: 608 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 1/20 21/21 [==============================] - ETA: 0s - loss: 1.6818 - categorical_accuracy: 0.3281Source path = /datasets/Project_data/val ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.23000, saving model to Conv3D_3_3_2021-05-2705_49_21.062337/model-00001-1.68179-0.32805-3.58926-0.23000.h5 21/21 [==============================] - 45s 2s/step - loss: 1.6818 - categorical_accuracy: 0.3281 - val_loss: 3.5893 - val_categorical_accuracy: 0.2300 Epoch 2/20 21/21 [==============================] - ETA: 0s - loss: 1.2204 - categorical_accuracy: 0.4977 Epoch 00002: val_categorical_accuracy improved from 0.23000 to 0.27000, saving model to Conv3D_3_3_2021-05-2705_49_21.062337/model-00002-1.22040-0.49774-2.02215-0.27000.h5 21/21 [==============================] - 46s 2s/step - loss: 1.2204 - categorical_accuracy: 0.4977 - val_loss: 2.0221 - val_categorical_accuracy: 0.2700 Epoch 3/20 21/21 [==============================] - ETA: 0s - loss: 1.0326 - categorical_accuracy: 0.6094 Epoch 00003: val_categorical_accuracy improved from 0.27000 to 0.30000, saving model to Conv3D_3_3_2021-05-2705_49_21.062337/model-00003-1.03263-0.60935-1.46958-0.30000.h5 21/21 [==============================] - 46s 2s/step - loss: 1.0326 - categorical_accuracy: 0.6094 - val_loss: 1.4696 - val_categorical_accuracy: 0.3000 Epoch 4/20 21/21 [==============================] - ETA: 0s - loss: 0.8375 - categorical_accuracy: 0.7006 Epoch 00004: val_categorical_accuracy improved from 0.30000 to 0.45000, saving model to Conv3D_3_3_2021-05-2705_49_21.062337/model-00004-0.83752-0.70060-1.27638-0.45000.h5 21/21 [==============================] - 44s 2s/step - loss: 0.8375 - categorical_accuracy: 0.7006 - val_loss: 1.2764 - val_categorical_accuracy: 0.4500 Epoch 5/20 21/21 [==============================] - ETA: 0s - loss: 0.7124 - categorical_accuracy: 0.7526 Epoch 00005: val_categorical_accuracy improved from 0.45000 to 0.58000, saving model to Conv3D_3_3_2021-05-2705_49_21.062337/model-00005-0.71242-0.75264-1.05466-0.58000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.7124 - categorical_accuracy: 0.7526 - val_loss: 1.0547 - val_categorical_accuracy: 0.5800 Epoch 6/20 21/21 [==============================] - ETA: 0s - loss: 0.6144 - categorical_accuracy: 0.7768 Epoch 00006: val_categorical_accuracy did not improve from 0.58000 21/21 [==============================] - 46s 2s/step - loss: 0.6144 - categorical_accuracy: 0.7768 - val_loss: 1.0882 - val_categorical_accuracy: 0.5200 Epoch 7/20 21/21 [==============================] - ETA: 0s - loss: 0.5131 - categorical_accuracy: 0.8265 Epoch 00007: val_categorical_accuracy did not improve from 0.58000 21/21 [==============================] - 46s 2s/step - loss: 0.5131 - categorical_accuracy: 0.8265 - val_loss: 0.9604 - val_categorical_accuracy: 0.5700 Epoch 8/20 21/21 [==============================] - ETA: 0s - loss: 0.3925 - categorical_accuracy: 0.8959 Epoch 00008: val_categorical_accuracy improved from 0.58000 to 0.59000, saving model to Conv3D_3_3_2021-05-2705_49_21.062337/model-00008-0.39245-0.89593-0.95087-0.59000.h5 21/21 [==============================] - 44s 2s/step - loss: 0.3925 - categorical_accuracy: 0.8959 - val_loss: 0.9509 - val_categorical_accuracy: 0.5900 Epoch 9/20 21/21 [==============================] - ETA: 0s - loss: 0.3561 - categorical_accuracy: 0.8997 Epoch 00009: val_categorical_accuracy improved from 0.59000 to 0.60000, saving model to Conv3D_3_3_2021-05-2705_49_21.062337/model-00009-0.35613-0.89970-0.92183-0.60000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.3561 - categorical_accuracy: 0.8997 - val_loss: 0.9218 - val_categorical_accuracy: 0.6000 Epoch 10/20 21/21 [==============================] - ETA: 0s - loss: 0.2717 - categorical_accuracy: 0.9457 Epoch 00010: val_categorical_accuracy improved from 0.60000 to 0.87000, saving model to Conv3D_3_3_2021-05-2705_49_21.062337/model-00010-0.27172-0.94570-0.50760-0.87000.h5 21/21 [==============================] - 45s 2s/step - loss: 0.2717 - categorical_accuracy: 0.9457 - val_loss: 0.5076 - val_categorical_accuracy: 0.8700 Epoch 11/20 21/21 [==============================] - ETA: 0s - loss: 0.2584 - categorical_accuracy: 0.9359 Epoch 00011: val_categorical_accuracy did not improve from 0.87000 21/21 [==============================] - 46s 2s/step - loss: 0.2584 - categorical_accuracy: 0.9359 - val_loss: 0.6368 - val_categorical_accuracy: 0.7600 Epoch 12/20 21/21 [==============================] - ETA: 0s - loss: 0.2132 - categorical_accuracy: 0.9517 Epoch 00012: val_categorical_accuracy did not improve from 0.87000 21/21 [==============================] - 44s 2s/step - loss: 0.2132 - categorical_accuracy: 0.9517 - val_loss: 0.9826 - val_categorical_accuracy: 0.6500 Epoch 13/20 21/21 [==============================] - ETA: 0s - loss: 0.1879 - categorical_accuracy: 0.9630 Epoch 00013: val_categorical_accuracy did not improve from 0.87000 21/21 [==============================] - 45s 2s/step - loss: 0.1879 - categorical_accuracy: 0.9630 - val_loss: 0.8094 - val_categorical_accuracy: 0.7100 Epoch 14/20 21/21 [==============================] - ETA: 0s - loss: 0.1537 - categorical_accuracy: 0.9751 Epoch 00014: val_categorical_accuracy did not improve from 0.87000 Epoch 00014: ReduceLROnPlateau reducing learning rate to 0.0019999999552965165. 21/21 [==============================] - 46s 2s/step - loss: 0.1537 - categorical_accuracy: 0.9751 - val_loss: 0.6507 - val_categorical_accuracy: 0.7600 Epoch 15/20 21/21 [==============================] - ETA: 0s - loss: 0.1312 - categorical_accuracy: 0.9796 Epoch 00015: val_categorical_accuracy improved from 0.87000 to 0.90000, saving model to Conv3D_3_3_2021-05-2705_49_21.062337/model-00015-0.13124-0.97964-0.42695-0.90000.h5 21/21 [==============================] - 45s 2s/step - loss: 0.1312 - categorical_accuracy: 0.9796 - val_loss: 0.4270 - val_categorical_accuracy: 0.9000 Epoch 16/20 21/21 [==============================] - ETA: 0s - loss: 0.1162 - categorical_accuracy: 0.9894 Epoch 00016: val_categorical_accuracy did not improve from 0.90000 21/21 [==============================] - 44s 2s/step - loss: 0.1162 - categorical_accuracy: 0.9894 - val_loss: 0.4118 - val_categorical_accuracy: 0.8700 Epoch 17/20 21/21 [==============================] - ETA: 0s - loss: 0.1197 - categorical_accuracy: 0.9872 Epoch 00017: val_categorical_accuracy did not improve from 0.90000 21/21 [==============================] - 45s 2s/step - loss: 0.1197 - categorical_accuracy: 0.9872 - val_loss: 0.4028 - val_categorical_accuracy: 0.8700 Epoch 18/20 21/21 [==============================] - ETA: 0s - loss: 0.1074 - categorical_accuracy: 0.9894 Epoch 00018: val_categorical_accuracy did not improve from 0.90000 21/21 [==============================] - 45s 2s/step - loss: 0.1074 - categorical_accuracy: 0.9894 - val_loss: 0.3367 - val_categorical_accuracy: 0.8900 Epoch 19/20 21/21 [==============================] - ETA: 0s - loss: 0.1145 - categorical_accuracy: 0.9849 Epoch 00019: val_categorical_accuracy did not improve from 0.90000 21/21 [==============================] - 45s 2s/step - loss: 0.1145 - categorical_accuracy: 0.9849 - val_loss: 0.4788 - val_categorical_accuracy: 0.8200 Epoch 20/20 21/21 [==============================] - ETA: 0s - loss: 0.0963 - categorical_accuracy: 0.9917 Epoch 00020: val_categorical_accuracy did not improve from 0.90000 21/21 [==============================] - 44s 2s/step - loss: 0.0963 - categorical_accuracy: 0.9917 - val_loss: 0.3368 - val_categorical_accuracy: 0.8900
Results of above model:
# Model 6 - Conv3D Model with filter size (3,3,3) with augmentation without Normalization and with Framesize 100x100
frame_height = 100
frame_width = 100
num_frames = 16
size_batch = 32
num_epochs = 20
augment = True
normalization = False
model = model_Conv3D_3(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
model_history = trainer(model, 'Conv3D_3_4', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalization)
modelplot(model_history)
Model: "sequential_7" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d_28 (Conv3D) (None, 16, 100, 100, 16) 1312 _________________________________________________________________ activation_47 (Activation) (None, 16, 100, 100, 16) 0 _________________________________________________________________ batch_normalization_35 (Batc (None, 16, 100, 100, 16) 64 _________________________________________________________________ max_pooling3d_28 (MaxPooling (None, 8, 50, 50, 16) 0 _________________________________________________________________ conv3d_29 (Conv3D) (None, 8, 50, 50, 32) 13856 _________________________________________________________________ activation_48 (Activation) (None, 8, 50, 50, 32) 0 _________________________________________________________________ batch_normalization_36 (Batc (None, 8, 50, 50, 32) 128 _________________________________________________________________ max_pooling3d_29 (MaxPooling (None, 4, 25, 25, 32) 0 _________________________________________________________________ conv3d_30 (Conv3D) (None, 4, 25, 25, 64) 55360 _________________________________________________________________ activation_49 (Activation) (None, 4, 25, 25, 64) 0 _________________________________________________________________ batch_normalization_37 (Batc (None, 4, 25, 25, 64) 256 _________________________________________________________________ max_pooling3d_30 (MaxPooling (None, 2, 12, 12, 64) 0 _________________________________________________________________ conv3d_31 (Conv3D) (None, 2, 12, 12, 128) 221312 _________________________________________________________________ activation_50 (Activation) (None, 2, 12, 12, 128) 0 _________________________________________________________________ batch_normalization_38 (Batc (None, 2, 12, 12, 128) 512 _________________________________________________________________ max_pooling3d_31 (MaxPooling (None, 1, 6, 6, 128) 0 _________________________________________________________________ flatten_7 (Flatten) (None, 4608) 0 _________________________________________________________________ dense_19 (Dense) (None, 64) 294976 _________________________________________________________________ activation_51 (Activation) (None, 64) 0 _________________________________________________________________ batch_normalization_39 (Batc (None, 64) 256 _________________________________________________________________ dropout_12 (Dropout) (None, 64) 0 _________________________________________________________________ dense_20 (Dense) (None, 128) 8320 _________________________________________________________________ activation_52 (Activation) (None, 128) 0 _________________________________________________________________ dropout_13 (Dropout) (None, 128) 0 _________________________________________________________________ dense_21 (Dense) (None, 5) 645 _________________________________________________________________ activation_53 (Activation) (None, 5) 0 ================================================================= Total params: 596,997 Trainable params: 596,389 Non-trainable params: 608 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 1/20 21/21 [==============================] - ETA: 0s - loss: 1.4289 - categorical_accuracy: 0.4005Source path = /datasets/Project_data/val ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.23000, saving model to Conv3D_3_4_2021-05-2706_04_33.426971/model-00001-1.42889-0.40045-45.87391-0.23000.h5 21/21 [==============================] - 46s 2s/step - loss: 1.4289 - categorical_accuracy: 0.4005 - val_loss: 45.8739 - val_categorical_accuracy: 0.2300 Epoch 2/20 21/21 [==============================] - ETA: 0s - loss: 1.0719 - categorical_accuracy: 0.5852 Epoch 00002: val_categorical_accuracy did not improve from 0.23000 21/21 [==============================] - 45s 2s/step - loss: 1.0719 - categorical_accuracy: 0.5852 - val_loss: 16.4185 - val_categorical_accuracy: 0.1600 Epoch 3/20 21/21 [==============================] - ETA: 0s - loss: 0.7507 - categorical_accuracy: 0.7134 Epoch 00003: val_categorical_accuracy improved from 0.23000 to 0.31000, saving model to Conv3D_3_4_2021-05-2706_04_33.426971/model-00003-0.75070-0.71342-3.90809-0.31000.h5 21/21 [==============================] - 45s 2s/step - loss: 0.7507 - categorical_accuracy: 0.7134 - val_loss: 3.9081 - val_categorical_accuracy: 0.3100 Epoch 4/20 21/21 [==============================] - ETA: 0s - loss: 0.5242 - categorical_accuracy: 0.8077 Epoch 00004: val_categorical_accuracy improved from 0.31000 to 0.37000, saving model to Conv3D_3_4_2021-05-2706_04_33.426971/model-00004-0.52416-0.80769-2.41075-0.37000.h5 21/21 [==============================] - 43s 2s/step - loss: 0.5242 - categorical_accuracy: 0.8077 - val_loss: 2.4107 - val_categorical_accuracy: 0.3700 Epoch 5/20 21/21 [==============================] - ETA: 0s - loss: 0.4035 - categorical_accuracy: 0.8582 Epoch 00005: val_categorical_accuracy improved from 0.37000 to 0.45000, saving model to Conv3D_3_4_2021-05-2706_04_33.426971/model-00005-0.40352-0.85822-1.84205-0.45000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.4035 - categorical_accuracy: 0.8582 - val_loss: 1.8420 - val_categorical_accuracy: 0.4500 Epoch 6/20 21/21 [==============================] - ETA: 0s - loss: 0.2816 - categorical_accuracy: 0.9050 Epoch 00006: val_categorical_accuracy improved from 0.45000 to 0.47000, saving model to Conv3D_3_4_2021-05-2706_04_33.426971/model-00006-0.28159-0.90498-1.82726-0.47000.h5 21/21 [==============================] - 44s 2s/step - loss: 0.2816 - categorical_accuracy: 0.9050 - val_loss: 1.8273 - val_categorical_accuracy: 0.4700 Epoch 7/20 21/21 [==============================] - ETA: 0s - loss: 0.1907 - categorical_accuracy: 0.9382 Epoch 00007: val_categorical_accuracy improved from 0.47000 to 0.73000, saving model to Conv3D_3_4_2021-05-2706_04_33.426971/model-00007-0.19074-0.93816-0.79164-0.73000.h5 21/21 [==============================] - 45s 2s/step - loss: 0.1907 - categorical_accuracy: 0.9382 - val_loss: 0.7916 - val_categorical_accuracy: 0.7300 Epoch 8/20 21/21 [==============================] - ETA: 0s - loss: 0.1271 - categorical_accuracy: 0.9653 Epoch 00008: val_categorical_accuracy did not improve from 0.73000 21/21 [==============================] - 43s 2s/step - loss: 0.1271 - categorical_accuracy: 0.9653 - val_loss: 0.6527 - val_categorical_accuracy: 0.7000 Epoch 9/20 21/21 [==============================] - ETA: 0s - loss: 0.0954 - categorical_accuracy: 0.9729 Epoch 00009: val_categorical_accuracy improved from 0.73000 to 0.75000, saving model to Conv3D_3_4_2021-05-2706_04_33.426971/model-00009-0.09544-0.97285-0.59760-0.75000.h5 21/21 [==============================] - 45s 2s/step - loss: 0.0954 - categorical_accuracy: 0.9729 - val_loss: 0.5976 - val_categorical_accuracy: 0.7500 Epoch 10/20 21/21 [==============================] - ETA: 0s - loss: 0.1088 - categorical_accuracy: 0.9668 Epoch 00010: val_categorical_accuracy improved from 0.75000 to 0.77000, saving model to Conv3D_3_4_2021-05-2706_04_33.426971/model-00010-0.10883-0.96682-0.80951-0.77000.h5 21/21 [==============================] - 45s 2s/step - loss: 0.1088 - categorical_accuracy: 0.9668 - val_loss: 0.8095 - val_categorical_accuracy: 0.7700 Epoch 11/20 21/21 [==============================] - ETA: 0s - loss: 0.0974 - categorical_accuracy: 0.9744 Epoch 00011: val_categorical_accuracy improved from 0.77000 to 0.87000, saving model to Conv3D_3_4_2021-05-2706_04_33.426971/model-00011-0.09742-0.97436-0.37085-0.87000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.0974 - categorical_accuracy: 0.9744 - val_loss: 0.3708 - val_categorical_accuracy: 0.8700 Epoch 12/20 21/21 [==============================] - ETA: 0s - loss: 0.0789 - categorical_accuracy: 0.9796 Epoch 00012: val_categorical_accuracy did not improve from 0.87000 21/21 [==============================] - 44s 2s/step - loss: 0.0789 - categorical_accuracy: 0.9796 - val_loss: 0.6115 - val_categorical_accuracy: 0.8000 Epoch 13/20 21/21 [==============================] - ETA: 0s - loss: 0.0671 - categorical_accuracy: 0.9796 Epoch 00013: val_categorical_accuracy did not improve from 0.87000 21/21 [==============================] - 45s 2s/step - loss: 0.0671 - categorical_accuracy: 0.9796 - val_loss: 0.5355 - val_categorical_accuracy: 0.8200 Epoch 14/20 21/21 [==============================] - ETA: 0s - loss: 0.0543 - categorical_accuracy: 0.9857 Epoch 00014: val_categorical_accuracy did not improve from 0.87000 21/21 [==============================] - 44s 2s/step - loss: 0.0543 - categorical_accuracy: 0.9857 - val_loss: 0.6499 - val_categorical_accuracy: 0.7700 Epoch 15/20 21/21 [==============================] - ETA: 0s - loss: 0.0437 - categorical_accuracy: 0.9872 Epoch 00015: val_categorical_accuracy improved from 0.87000 to 0.90000, saving model to Conv3D_3_4_2021-05-2706_04_33.426971/model-00015-0.04372-0.98718-0.40197-0.90000.h5 Epoch 00015: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. 21/21 [==============================] - 45s 2s/step - loss: 0.0437 - categorical_accuracy: 0.9872 - val_loss: 0.4020 - val_categorical_accuracy: 0.9000 Epoch 16/20 21/21 [==============================] - ETA: 0s - loss: 0.0401 - categorical_accuracy: 0.9894 Epoch 00016: val_categorical_accuracy did not improve from 0.90000 21/21 [==============================] - 43s 2s/step - loss: 0.0401 - categorical_accuracy: 0.9894 - val_loss: 0.2708 - val_categorical_accuracy: 0.8900 Epoch 17/20 21/21 [==============================] - ETA: 0s - loss: 0.0190 - categorical_accuracy: 0.9977 Epoch 00017: val_categorical_accuracy did not improve from 0.90000 21/21 [==============================] - 45s 2s/step - loss: 0.0190 - categorical_accuracy: 0.9977 - val_loss: 0.3039 - val_categorical_accuracy: 0.9000 Epoch 18/20 21/21 [==============================] - ETA: 0s - loss: 0.0177 - categorical_accuracy: 0.9970 Epoch 00018: val_categorical_accuracy did not improve from 0.90000 21/21 [==============================] - 46s 2s/step - loss: 0.0177 - categorical_accuracy: 0.9970 - val_loss: 0.3396 - val_categorical_accuracy: 0.8700 Epoch 19/20 21/21 [==============================] - ETA: 0s - loss: 0.0113 - categorical_accuracy: 0.9977 Epoch 00019: val_categorical_accuracy did not improve from 0.90000 21/21 [==============================] - 46s 2s/step - loss: 0.0113 - categorical_accuracy: 0.9977 - val_loss: 0.3478 - val_categorical_accuracy: 0.8900 Epoch 20/20 21/21 [==============================] - ETA: 0s - loss: 0.0155 - categorical_accuracy: 0.9970 Epoch 00020: val_categorical_accuracy improved from 0.90000 to 0.91000, saving model to Conv3D_3_4_2021-05-2706_04_33.426971/model-00020-0.01551-0.99698-0.28990-0.91000.h5 Epoch 00020: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05. 21/21 [==============================] - 44s 2s/step - loss: 0.0155 - categorical_accuracy: 0.9970 - val_loss: 0.2899 - val_categorical_accuracy: 0.9100
Results of above model:
# Model 7 - Conv3D Model with filter size (3,3,3) with augmentation without Normalization and with Framesize 100x100 and SGD Optimizer
frame_height = 100
frame_width = 100
num_frames = 16
size_batch = 32
num_epochs = 20
augment = True
normalization = False
model = model_Conv3D_3(num_frames, frame_height, frame_width)
optimiser = optimizers.SGD()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
model_history = trainer(model, 'Conv3D_3_5', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalization)
modelplot(model_history)
Model: "sequential_8" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d_32 (Conv3D) (None, 16, 100, 100, 16) 1312 _________________________________________________________________ activation_54 (Activation) (None, 16, 100, 100, 16) 0 _________________________________________________________________ batch_normalization_40 (Batc (None, 16, 100, 100, 16) 64 _________________________________________________________________ max_pooling3d_32 (MaxPooling (None, 8, 50, 50, 16) 0 _________________________________________________________________ conv3d_33 (Conv3D) (None, 8, 50, 50, 32) 13856 _________________________________________________________________ activation_55 (Activation) (None, 8, 50, 50, 32) 0 _________________________________________________________________ batch_normalization_41 (Batc (None, 8, 50, 50, 32) 128 _________________________________________________________________ max_pooling3d_33 (MaxPooling (None, 4, 25, 25, 32) 0 _________________________________________________________________ conv3d_34 (Conv3D) (None, 4, 25, 25, 64) 55360 _________________________________________________________________ activation_56 (Activation) (None, 4, 25, 25, 64) 0 _________________________________________________________________ batch_normalization_42 (Batc (None, 4, 25, 25, 64) 256 _________________________________________________________________ max_pooling3d_34 (MaxPooling (None, 2, 12, 12, 64) 0 _________________________________________________________________ conv3d_35 (Conv3D) (None, 2, 12, 12, 128) 221312 _________________________________________________________________ activation_57 (Activation) (None, 2, 12, 12, 128) 0 _________________________________________________________________ batch_normalization_43 (Batc (None, 2, 12, 12, 128) 512 _________________________________________________________________ max_pooling3d_35 (MaxPooling (None, 1, 6, 6, 128) 0 _________________________________________________________________ flatten_8 (Flatten) (None, 4608) 0 _________________________________________________________________ dense_22 (Dense) (None, 64) 294976 _________________________________________________________________ activation_58 (Activation) (None, 64) 0 _________________________________________________________________ batch_normalization_44 (Batc (None, 64) 256 _________________________________________________________________ dropout_14 (Dropout) (None, 64) 0 _________________________________________________________________ dense_23 (Dense) (None, 128) 8320 _________________________________________________________________ activation_59 (Activation) (None, 128) 0 _________________________________________________________________ dropout_15 (Dropout) (None, 128) 0 _________________________________________________________________ dense_24 (Dense) (None, 5) 645 _________________________________________________________________ activation_60 (Activation) (None, 5) 0 ================================================================= Total params: 596,997 Trainable params: 596,389 Non-trainable params: 608 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 1/20 21/21 [==============================] - ETA: 0s - loss: 1.6767 - categorical_accuracy: 0.3220Source path = /datasets/Project_data/val ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.23000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00001-1.67671-0.32202-4.53826-0.23000.h5 21/21 [==============================] - 46s 2s/step - loss: 1.6767 - categorical_accuracy: 0.3220 - val_loss: 4.5383 - val_categorical_accuracy: 0.2300 Epoch 2/20 21/21 [==============================] - ETA: 0s - loss: 1.1779 - categorical_accuracy: 0.5158 Epoch 00002: val_categorical_accuracy improved from 0.23000 to 0.26000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00002-1.17787-0.51584-2.45555-0.26000.h5 21/21 [==============================] - 45s 2s/step - loss: 1.1779 - categorical_accuracy: 0.5158 - val_loss: 2.4555 - val_categorical_accuracy: 0.2600 Epoch 3/20 21/21 [==============================] - ETA: 0s - loss: 0.8841 - categorical_accuracy: 0.6425 Epoch 00003: val_categorical_accuracy improved from 0.26000 to 0.32000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00003-0.88405-0.64253-1.63749-0.32000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.8841 - categorical_accuracy: 0.6425 - val_loss: 1.6375 - val_categorical_accuracy: 0.3200 Epoch 4/20 21/21 [==============================] - ETA: 0s - loss: 0.7242 - categorical_accuracy: 0.7413 Epoch 00004: val_categorical_accuracy improved from 0.32000 to 0.38000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00004-0.72417-0.74133-1.50776-0.38000.h5 21/21 [==============================] - 44s 2s/step - loss: 0.7242 - categorical_accuracy: 0.7413 - val_loss: 1.5078 - val_categorical_accuracy: 0.3800 Epoch 5/20 21/21 [==============================] - ETA: 0s - loss: 0.6319 - categorical_accuracy: 0.7828 Epoch 00005: val_categorical_accuracy improved from 0.38000 to 0.59000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00005-0.63191-0.78281-1.06865-0.59000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.6319 - categorical_accuracy: 0.7828 - val_loss: 1.0687 - val_categorical_accuracy: 0.5900 Epoch 6/20 21/21 [==============================] - ETA: 0s - loss: 0.5292 - categorical_accuracy: 0.8371 Epoch 00006: val_categorical_accuracy did not improve from 0.59000 21/21 [==============================] - 46s 2s/step - loss: 0.5292 - categorical_accuracy: 0.8371 - val_loss: 1.0025 - val_categorical_accuracy: 0.5700 Epoch 7/20 21/21 [==============================] - ETA: 0s - loss: 0.4393 - categorical_accuracy: 0.8741 Epoch 00007: val_categorical_accuracy improved from 0.59000 to 0.67000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00007-0.43928-0.87406-0.87290-0.67000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.4393 - categorical_accuracy: 0.8741 - val_loss: 0.8729 - val_categorical_accuracy: 0.6700 Epoch 8/20 21/21 [==============================] - ETA: 0s - loss: 0.3943 - categorical_accuracy: 0.8854 Epoch 00008: val_categorical_accuracy did not improve from 0.67000 21/21 [==============================] - 43s 2s/step - loss: 0.3943 - categorical_accuracy: 0.8854 - val_loss: 0.9716 - val_categorical_accuracy: 0.5800 Epoch 9/20 21/21 [==============================] - ETA: 0s - loss: 0.3174 - categorical_accuracy: 0.9223 Epoch 00009: val_categorical_accuracy improved from 0.67000 to 0.68000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00009-0.31735-0.92232-0.84030-0.68000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.3174 - categorical_accuracy: 0.9223 - val_loss: 0.8403 - val_categorical_accuracy: 0.6800 Epoch 10/20 21/21 [==============================] - ETA: 0s - loss: 0.2739 - categorical_accuracy: 0.9389 Epoch 00010: val_categorical_accuracy improved from 0.68000 to 0.70000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00010-0.27395-0.93891-0.80749-0.70000.h5 21/21 [==============================] - 45s 2s/step - loss: 0.2739 - categorical_accuracy: 0.9389 - val_loss: 0.8075 - val_categorical_accuracy: 0.7000 Epoch 11/20 21/21 [==============================] - ETA: 0s - loss: 0.2296 - categorical_accuracy: 0.9495 Epoch 00011: val_categorical_accuracy improved from 0.70000 to 0.79000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00011-0.22957-0.94947-0.69314-0.79000.h5 21/21 [==============================] - 45s 2s/step - loss: 0.2296 - categorical_accuracy: 0.9495 - val_loss: 0.6931 - val_categorical_accuracy: 0.7900 Epoch 12/20 21/21 [==============================] - ETA: 0s - loss: 0.2137 - categorical_accuracy: 0.9532 Epoch 00012: val_categorical_accuracy improved from 0.79000 to 0.80000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00012-0.21367-0.95324-0.62146-0.80000.h5 21/21 [==============================] - 44s 2s/step - loss: 0.2137 - categorical_accuracy: 0.9532 - val_loss: 0.6215 - val_categorical_accuracy: 0.8000 Epoch 13/20 21/21 [==============================] - ETA: 0s - loss: 0.1773 - categorical_accuracy: 0.9706 Epoch 00013: val_categorical_accuracy improved from 0.80000 to 0.83000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00013-0.17728-0.97059-0.53621-0.83000.h5 21/21 [==============================] - 46s 2s/step - loss: 0.1773 - categorical_accuracy: 0.9706 - val_loss: 0.5362 - val_categorical_accuracy: 0.8300 Epoch 14/20 21/21 [==============================] - ETA: 0s - loss: 0.1474 - categorical_accuracy: 0.9827 Epoch 00014: val_categorical_accuracy did not improve from 0.83000 21/21 [==============================] - 46s 2s/step - loss: 0.1474 - categorical_accuracy: 0.9827 - val_loss: 0.5239 - val_categorical_accuracy: 0.8200 Epoch 15/20 21/21 [==============================] - ETA: 0s - loss: 0.1485 - categorical_accuracy: 0.9744 Epoch 00015: val_categorical_accuracy did not improve from 0.83000 21/21 [==============================] - 45s 2s/step - loss: 0.1485 - categorical_accuracy: 0.9744 - val_loss: 0.5870 - val_categorical_accuracy: 0.7700 Epoch 16/20 21/21 [==============================] - ETA: 0s - loss: 0.1288 - categorical_accuracy: 0.9811 Epoch 00016: val_categorical_accuracy improved from 0.83000 to 0.86000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00016-0.12885-0.98115-0.48172-0.86000.h5 21/21 [==============================] - 44s 2s/step - loss: 0.1288 - categorical_accuracy: 0.9811 - val_loss: 0.4817 - val_categorical_accuracy: 0.8600 Epoch 17/20 21/21 [==============================] - ETA: 0s - loss: 0.1174 - categorical_accuracy: 0.9842 Epoch 00017: val_categorical_accuracy did not improve from 0.86000 21/21 [==============================] - 46s 2s/step - loss: 0.1174 - categorical_accuracy: 0.9842 - val_loss: 0.4521 - val_categorical_accuracy: 0.8300 Epoch 18/20 21/21 [==============================] - ETA: 0s - loss: 0.1232 - categorical_accuracy: 0.9789 Epoch 00018: val_categorical_accuracy did not improve from 0.86000 21/21 [==============================] - 46s 2s/step - loss: 0.1232 - categorical_accuracy: 0.9789 - val_loss: 0.5296 - val_categorical_accuracy: 0.8200 Epoch 19/20 21/21 [==============================] - ETA: 0s - loss: 0.0856 - categorical_accuracy: 0.9910 Epoch 00019: val_categorical_accuracy did not improve from 0.86000 21/21 [==============================] - 45s 2s/step - loss: 0.0856 - categorical_accuracy: 0.9910 - val_loss: 0.4190 - val_categorical_accuracy: 0.8600 Epoch 20/20 21/21 [==============================] - ETA: 0s - loss: 0.1126 - categorical_accuracy: 0.9796 Epoch 00020: val_categorical_accuracy improved from 0.86000 to 0.89000, saving model to Conv3D_3_5_2021-05-2706_19_36.894290/model-00020-0.11265-0.97964-0.38642-0.89000.h5 21/21 [==============================] - 45s 2s/step - loss: 0.1126 - categorical_accuracy: 0.9796 - val_loss: 0.3864 - val_categorical_accuracy: 0.8900
Results of above model:
# Model Architecture 4 - Conv3D Model with filter size (3,3,3)
# Conv3D Filters 8, 16, 32, 64 followed by Dense layers in descending order of 512, 64 and finally 5
# Batch Normalization and Dropouts are also used
def model_Conv3D_4(num_frames, frame_height, frame_width):
# Define model
model = Sequential()
model.add(Conv3D(8,
kernel_size=(3,3,3),
input_shape=(num_frames, frame_height, frame_width,3),
padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Conv3D(16,
kernel_size=(3,3,3),
padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Conv3D(32,
kernel_size=(1,3,3),
padding='same'))
model.add(BatchNormalization())
model.add(Activation('relu'))
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Conv3D(64,
kernel_size=(1,3,3),
padding='same'))
model.add(Activation('relu'))
model.add(Dropout(0.25))
model.add(MaxPooling3D(pool_size=(2,2,2)))
#Flatten Layers
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(64, activation='relu'))
model.add(Dropout(0.25))
#softmax layer
model.add(Dense(5, activation='softmax'))
return model
# Model 8 - Conv3D Model with filter size (3,3,3) with augmentation without Normalization and with Framesize 100x100
frame_height = 120
frame_width = 120
num_frames = 20
size_batch = 32
num_epochs = 30
augment = False
normalization = False
model = model_Conv3D_4(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
model_history = trainer(model, 'Conv3D_4_1', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalization)
modelplot(model_history)
Model: "sequential_9" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d_36 (Conv3D) (None, 20, 120, 120, 8) 656 _________________________________________________________________ batch_normalization_45 (Batc (None, 20, 120, 120, 8) 32 _________________________________________________________________ activation_61 (Activation) (None, 20, 120, 120, 8) 0 _________________________________________________________________ max_pooling3d_36 (MaxPooling (None, 10, 60, 60, 8) 0 _________________________________________________________________ conv3d_37 (Conv3D) (None, 10, 60, 60, 16) 3472 _________________________________________________________________ batch_normalization_46 (Batc (None, 10, 60, 60, 16) 64 _________________________________________________________________ activation_62 (Activation) (None, 10, 60, 60, 16) 0 _________________________________________________________________ max_pooling3d_37 (MaxPooling (None, 5, 30, 30, 16) 0 _________________________________________________________________ conv3d_38 (Conv3D) (None, 5, 30, 30, 32) 4640 _________________________________________________________________ batch_normalization_47 (Batc (None, 5, 30, 30, 32) 128 _________________________________________________________________ activation_63 (Activation) (None, 5, 30, 30, 32) 0 _________________________________________________________________ max_pooling3d_38 (MaxPooling (None, 2, 15, 15, 32) 0 _________________________________________________________________ conv3d_39 (Conv3D) (None, 2, 15, 15, 64) 18496 _________________________________________________________________ activation_64 (Activation) (None, 2, 15, 15, 64) 0 _________________________________________________________________ dropout_16 (Dropout) (None, 2, 15, 15, 64) 0 _________________________________________________________________ max_pooling3d_39 (MaxPooling (None, 1, 7, 7, 64) 0 _________________________________________________________________ flatten_9 (Flatten) (None, 3136) 0 _________________________________________________________________ dense_25 (Dense) (None, 512) 1606144 _________________________________________________________________ dropout_17 (Dropout) (None, 512) 0 _________________________________________________________________ dense_26 (Dense) (None, 64) 32832 _________________________________________________________________ dropout_18 (Dropout) (None, 64) 0 _________________________________________________________________ dense_27 (Dense) (None, 5) 325 ================================================================= Total params: 1,666,789 Trainable params: 1,666,677 Non-trainable params: 112 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 32 Image Indexes: [ 0 2 3 5 6 8 9 11 12 14 15 17 18 20 21 23 24 26 27 29] Epoch 1/30 21/21 [==============================] - ETA: 0s - loss: 2.1211 - categorical_accuracy: 0.2368Source path = /datasets/Project_data/val ; batch size = 32 Image Indexes: [ 0 2 3 5 6 8 9 11 12 14 15 17 18 20 21 23 24 26 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.18000, saving model to Conv3D_4_1_2021-05-2706_34_48.855871/model-00001-2.12109-0.23680-3.15010-0.18000.h5 21/21 [==============================] - 47s 2s/step - loss: 2.1211 - categorical_accuracy: 0.2368 - val_loss: 3.1501 - val_categorical_accuracy: 0.1800 Epoch 2/30 21/21 [==============================] - ETA: 0s - loss: 1.3686 - categorical_accuracy: 0.3997 Epoch 00002: val_categorical_accuracy improved from 0.18000 to 0.24000, saving model to Conv3D_4_1_2021-05-2706_34_48.855871/model-00002-1.36858-0.39970-4.47820-0.24000.h5 21/21 [==============================] - 48s 2s/step - loss: 1.3686 - categorical_accuracy: 0.3997 - val_loss: 4.4782 - val_categorical_accuracy: 0.2400 Epoch 3/30 21/21 [==============================] - ETA: 0s - loss: 1.2331 - categorical_accuracy: 0.4630 Epoch 00003: val_categorical_accuracy improved from 0.24000 to 0.26000, saving model to Conv3D_4_1_2021-05-2706_34_48.855871/model-00003-1.23305-0.46305-1.81666-0.26000.h5 21/21 [==============================] - 47s 2s/step - loss: 1.2331 - categorical_accuracy: 0.4630 - val_loss: 1.8167 - val_categorical_accuracy: 0.2600 Epoch 4/30 21/21 [==============================] - ETA: 0s - loss: 1.0907 - categorical_accuracy: 0.5158 Epoch 00004: val_categorical_accuracy did not improve from 0.26000 21/21 [==============================] - 46s 2s/step - loss: 1.0907 - categorical_accuracy: 0.5158 - val_loss: 2.1494 - val_categorical_accuracy: 0.2500 Epoch 5/30 21/21 [==============================] - ETA: 0s - loss: 0.9323 - categorical_accuracy: 0.6199 Epoch 00005: val_categorical_accuracy improved from 0.26000 to 0.42000, saving model to Conv3D_4_1_2021-05-2706_34_48.855871/model-00005-0.93232-0.61991-1.19609-0.42000.h5 21/21 [==============================] - 47s 2s/step - loss: 0.9323 - categorical_accuracy: 0.6199 - val_loss: 1.1961 - val_categorical_accuracy: 0.4200 Epoch 6/30 21/21 [==============================] - ETA: 0s - loss: 0.8567 - categorical_accuracy: 0.6621 Epoch 00006: val_categorical_accuracy improved from 0.42000 to 0.51000, saving model to Conv3D_4_1_2021-05-2706_34_48.855871/model-00006-0.85665-0.66214-1.19689-0.51000.h5 21/21 [==============================] - 47s 2s/step - loss: 0.8567 - categorical_accuracy: 0.6621 - val_loss: 1.1969 - val_categorical_accuracy: 0.5100 Epoch 7/30 21/21 [==============================] - ETA: 0s - loss: 0.7846 - categorical_accuracy: 0.7134 Epoch 00007: val_categorical_accuracy did not improve from 0.51000 21/21 [==============================] - 46s 2s/step - loss: 0.7846 - categorical_accuracy: 0.7134 - val_loss: 1.1698 - val_categorical_accuracy: 0.4800 Epoch 8/30 21/21 [==============================] - ETA: 0s - loss: 0.6420 - categorical_accuracy: 0.7541 Epoch 00008: val_categorical_accuracy did not improve from 0.51000 21/21 [==============================] - 45s 2s/step - loss: 0.6420 - categorical_accuracy: 0.7541 - val_loss: 1.3769 - val_categorical_accuracy: 0.5100 Epoch 9/30 21/21 [==============================] - ETA: 0s - loss: 0.4316 - categorical_accuracy: 0.8326 Epoch 00009: val_categorical_accuracy did not improve from 0.51000 21/21 [==============================] - 47s 2s/step - loss: 0.4316 - categorical_accuracy: 0.8326 - val_loss: 2.4490 - val_categorical_accuracy: 0.4000 Epoch 10/30 21/21 [==============================] - ETA: 0s - loss: 0.4488 - categorical_accuracy: 0.8462 Epoch 00010: val_categorical_accuracy improved from 0.51000 to 0.81000, saving model to Conv3D_4_1_2021-05-2706_34_48.855871/model-00010-0.44880-0.84615-0.52470-0.81000.h5 21/21 [==============================] - 47s 2s/step - loss: 0.4488 - categorical_accuracy: 0.8462 - val_loss: 0.5247 - val_categorical_accuracy: 0.8100 Epoch 11/30 21/21 [==============================] - ETA: 0s - loss: 0.3414 - categorical_accuracy: 0.8763 Epoch 00011: val_categorical_accuracy did not improve from 0.81000 21/21 [==============================] - 46s 2s/step - loss: 0.3414 - categorical_accuracy: 0.8763 - val_loss: 0.5789 - val_categorical_accuracy: 0.7900 Epoch 12/30 21/21 [==============================] - ETA: 0s - loss: 0.2687 - categorical_accuracy: 0.8959 Epoch 00012: val_categorical_accuracy did not improve from 0.81000 21/21 [==============================] - 44s 2s/step - loss: 0.2687 - categorical_accuracy: 0.8959 - val_loss: 0.6114 - val_categorical_accuracy: 0.7500 Epoch 13/30 21/21 [==============================] - ETA: 0s - loss: 0.1778 - categorical_accuracy: 0.9367 Epoch 00013: val_categorical_accuracy did not improve from 0.81000 21/21 [==============================] - 46s 2s/step - loss: 0.1778 - categorical_accuracy: 0.9367 - val_loss: 0.6783 - val_categorical_accuracy: 0.7700 Epoch 14/30 21/21 [==============================] - ETA: 0s - loss: 0.1527 - categorical_accuracy: 0.9502 Epoch 00014: val_categorical_accuracy did not improve from 0.81000 Epoch 00014: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. 21/21 [==============================] - 47s 2s/step - loss: 0.1527 - categorical_accuracy: 0.9502 - val_loss: 0.6792 - val_categorical_accuracy: 0.7600 Epoch 15/30 21/21 [==============================] - ETA: 0s - loss: 0.1282 - categorical_accuracy: 0.9608 Epoch 00015: val_categorical_accuracy did not improve from 0.81000 21/21 [==============================] - 47s 2s/step - loss: 0.1282 - categorical_accuracy: 0.9608 - val_loss: 1.1426 - val_categorical_accuracy: 0.6900 Epoch 16/30 21/21 [==============================] - ETA: 0s - loss: 0.0976 - categorical_accuracy: 0.9698 Epoch 00016: val_categorical_accuracy improved from 0.81000 to 0.91000, saving model to Conv3D_4_1_2021-05-2706_34_48.855871/model-00016-0.09756-0.96983-0.28389-0.91000.h5 21/21 [==============================] - 45s 2s/step - loss: 0.0976 - categorical_accuracy: 0.9698 - val_loss: 0.2839 - val_categorical_accuracy: 0.9100 Epoch 17/30 21/21 [==============================] - ETA: 0s - loss: 0.1037 - categorical_accuracy: 0.9683 Epoch 00017: val_categorical_accuracy did not improve from 0.91000 21/21 [==============================] - 47s 2s/step - loss: 0.1037 - categorical_accuracy: 0.9683 - val_loss: 0.4187 - val_categorical_accuracy: 0.8600 Epoch 18/30 21/21 [==============================] - ETA: 0s - loss: 0.0806 - categorical_accuracy: 0.9713 Epoch 00018: val_categorical_accuracy did not improve from 0.91000 21/21 [==============================] - 46s 2s/step - loss: 0.0806 - categorical_accuracy: 0.9713 - val_loss: 0.2961 - val_categorical_accuracy: 0.9000 Epoch 19/30 21/21 [==============================] - ETA: 0s - loss: 0.0563 - categorical_accuracy: 0.9864 Epoch 00019: val_categorical_accuracy did not improve from 0.91000 21/21 [==============================] - 47s 2s/step - loss: 0.0563 - categorical_accuracy: 0.9864 - val_loss: 0.2938 - val_categorical_accuracy: 0.8900 Epoch 20/30 21/21 [==============================] - ETA: 0s - loss: 0.0551 - categorical_accuracy: 0.9864 Epoch 00020: val_categorical_accuracy improved from 0.91000 to 0.92000, saving model to Conv3D_4_1_2021-05-2706_34_48.855871/model-00020-0.05512-0.98643-0.28649-0.92000.h5 Epoch 00020: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05. 21/21 [==============================] - 45s 2s/step - loss: 0.0551 - categorical_accuracy: 0.9864 - val_loss: 0.2865 - val_categorical_accuracy: 0.9200 Epoch 21/30 21/21 [==============================] - ETA: 0s - loss: 0.0528 - categorical_accuracy: 0.9849 Epoch 00021: val_categorical_accuracy did not improve from 0.92000 21/21 [==============================] - 47s 2s/step - loss: 0.0528 - categorical_accuracy: 0.9849 - val_loss: 0.2808 - val_categorical_accuracy: 0.8800 Epoch 22/30 21/21 [==============================] - ETA: 0s - loss: 0.0477 - categorical_accuracy: 0.9894 Epoch 00022: val_categorical_accuracy did not improve from 0.92000 21/21 [==============================] - 47s 2s/step - loss: 0.0477 - categorical_accuracy: 0.9894 - val_loss: 0.2301 - val_categorical_accuracy: 0.9200 Epoch 23/30 21/21 [==============================] - ETA: 0s - loss: 0.0489 - categorical_accuracy: 0.9849 Epoch 00023: val_categorical_accuracy did not improve from 0.92000 21/21 [==============================] - 46s 2s/step - loss: 0.0489 - categorical_accuracy: 0.9849 - val_loss: 0.2086 - val_categorical_accuracy: 0.9000 Epoch 24/30 21/21 [==============================] - ETA: 0s - loss: 0.0565 - categorical_accuracy: 0.9789 Epoch 00024: val_categorical_accuracy did not improve from 0.92000 21/21 [==============================] - 44s 2s/step - loss: 0.0565 - categorical_accuracy: 0.9789 - val_loss: 0.2634 - val_categorical_accuracy: 0.9000 Epoch 25/30 21/21 [==============================] - ETA: 0s - loss: 0.0489 - categorical_accuracy: 0.9849 Epoch 00025: val_categorical_accuracy did not improve from 0.92000 21/21 [==============================] - 46s 2s/step - loss: 0.0489 - categorical_accuracy: 0.9849 - val_loss: 0.2675 - val_categorical_accuracy: 0.8900 Epoch 26/30 21/21 [==============================] - ETA: 0s - loss: 0.0479 - categorical_accuracy: 0.9864 Epoch 00026: val_categorical_accuracy did not improve from 0.92000 21/21 [==============================] - 47s 2s/step - loss: 0.0479 - categorical_accuracy: 0.9864 - val_loss: 0.3459 - val_categorical_accuracy: 0.8800 Epoch 27/30 21/21 [==============================] - ETA: 0s - loss: 0.0505 - categorical_accuracy: 0.9864 Epoch 00027: val_categorical_accuracy improved from 0.92000 to 0.94000, saving model to Conv3D_4_1_2021-05-2706_34_48.855871/model-00027-0.05054-0.98643-0.17316-0.94000.h5 21/21 [==============================] - 47s 2s/step - loss: 0.0505 - categorical_accuracy: 0.9864 - val_loss: 0.1732 - val_categorical_accuracy: 0.9400 Epoch 28/30 21/21 [==============================] - ETA: 0s - loss: 0.0422 - categorical_accuracy: 0.9879 Epoch 00028: val_categorical_accuracy did not improve from 0.94000 21/21 [==============================] - 45s 2s/step - loss: 0.0422 - categorical_accuracy: 0.9879 - val_loss: 0.2745 - val_categorical_accuracy: 0.8900 Epoch 29/30 21/21 [==============================] - ETA: 0s - loss: 0.0515 - categorical_accuracy: 0.9849 Epoch 00029: val_categorical_accuracy did not improve from 0.94000 21/21 [==============================] - 46s 2s/step - loss: 0.0515 - categorical_accuracy: 0.9849 - val_loss: 0.2555 - val_categorical_accuracy: 0.8900 Epoch 30/30 21/21 [==============================] - ETA: 0s - loss: 0.0604 - categorical_accuracy: 0.9774 Epoch 00030: val_categorical_accuracy did not improve from 0.94000 21/21 [==============================] - 47s 2s/step - loss: 0.0604 - categorical_accuracy: 0.9774 - val_loss: 0.2992 - val_categorical_accuracy: 0.8800
# Conv3D Model with filter size (3,3,3)
def model_Conv3D_5(num_frames, frame_height, frame_width):
model = Sequential()
model.add(Conv3D(32,(3,3,3), strides=(1,1,1),
input_shape = (num_frames, frame_height, frame_width, 3),
padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling3D(pool_size=(2,2,1),strides=(2,2,1)))
model.add(Conv3D(64, (3,3,3), padding='same'))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.1))
model.add(Conv3D(64, (3,3, 3)))
model.add(BatchNormalization())
model.add(LeakyReLU(alpha=0.1))
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Dropout(0.25)) # Dropout 0.5
model.add(Flatten())
model.add(Dropout(0.25)) # Dropout 0.25
model.add(Dense(512))
model.add(LeakyReLU(alpha=0.1))
# Final softmax layer for the 5 classes
model.add(Dense(5))
model.add(Activation('softmax'))
return model
# Model 9 - Conv3D Model with filter size (3,3,3)
frame_height = 100
frame_width = 100
num_frames = 15
size_batch = 32
num_epochs = 30
augment = True
normalize = True
model = model_Conv3D_5(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
model_history = trainer(model, 'Conv3D_5_1', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalize)
modelplot(model_history)
Model: "sequential_10" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d_40 (Conv3D) (None, 15, 100, 100, 32) 2624 _________________________________________________________________ batch_normalization_48 (Batc (None, 15, 100, 100, 32) 128 _________________________________________________________________ leaky_re_lu (LeakyReLU) (None, 15, 100, 100, 32) 0 _________________________________________________________________ max_pooling3d_40 (MaxPooling (None, 7, 50, 100, 32) 0 _________________________________________________________________ conv3d_41 (Conv3D) (None, 7, 50, 100, 64) 55360 _________________________________________________________________ batch_normalization_49 (Batc (None, 7, 50, 100, 64) 256 _________________________________________________________________ leaky_re_lu_1 (LeakyReLU) (None, 7, 50, 100, 64) 0 _________________________________________________________________ conv3d_42 (Conv3D) (None, 5, 48, 98, 64) 110656 _________________________________________________________________ batch_normalization_50 (Batc (None, 5, 48, 98, 64) 256 _________________________________________________________________ leaky_re_lu_2 (LeakyReLU) (None, 5, 48, 98, 64) 0 _________________________________________________________________ max_pooling3d_41 (MaxPooling (None, 2, 24, 49, 64) 0 _________________________________________________________________ dropout_19 (Dropout) (None, 2, 24, 49, 64) 0 _________________________________________________________________ flatten_10 (Flatten) (None, 150528) 0 _________________________________________________________________ dropout_20 (Dropout) (None, 150528) 0 _________________________________________________________________ dense_28 (Dense) (None, 512) 77070848 _________________________________________________________________ leaky_re_lu_3 (LeakyReLU) (None, 512) 0 _________________________________________________________________ dense_29 (Dense) (None, 5) 2565 _________________________________________________________________ activation_65 (Activation) (None, 5) 0 ================================================================= Total params: 77,242,693 Trainable params: 77,242,373 Non-trainable params: 320 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 15 17 19 21 23 25 27 29] Epoch 1/30 2/21 [=>............................] - ETA: 6s - loss: 156.1209 - categorical_accuracy: 0.2422WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.2594s vs `on_train_batch_end` time: 0.3966s). Check your callbacks. 21/21 [==============================] - ETA: 0s - loss: 79.3026 - categorical_accuracy: 0.3228Source path = /datasets/Project_data/val ; batch size = 32 Image Indexes: [ 0 2 4 6 8 10 12 15 17 19 21 23 25 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.23000, saving model to Conv3D_5_1_2021-05-2706_58_05.690846/model-00001-79.30255-0.32278-313.75848-0.23000.h5 21/21 [==============================] - 47s 2s/step - loss: 79.3026 - categorical_accuracy: 0.3228 - val_loss: 313.7585 - val_categorical_accuracy: 0.2300 Epoch 2/30 21/21 [==============================] - ETA: 0s - loss: 10.1303 - categorical_accuracy: 0.4879 Epoch 00002: val_categorical_accuracy improved from 0.23000 to 0.35000, saving model to Conv3D_5_1_2021-05-2706_58_05.690846/model-00002-10.13029-0.48793-15.63692-0.35000.h5 21/21 [==============================] - 45s 2s/step - loss: 10.1303 - categorical_accuracy: 0.4879 - val_loss: 15.6369 - val_categorical_accuracy: 0.3500 Epoch 3/30 21/21 [==============================] - ETA: 0s - loss: 3.8225 - categorical_accuracy: 0.6229 Epoch 00003: val_categorical_accuracy did not improve from 0.35000 21/21 [==============================] - 44s 2s/step - loss: 3.8225 - categorical_accuracy: 0.6229 - val_loss: 4.0688 - val_categorical_accuracy: 0.3500 Epoch 4/30 21/21 [==============================] - ETA: 0s - loss: 1.4344 - categorical_accuracy: 0.7632 Epoch 00004: val_categorical_accuracy did not improve from 0.35000 21/21 [==============================] - 42s 2s/step - loss: 1.4344 - categorical_accuracy: 0.7632 - val_loss: 6.2527 - val_categorical_accuracy: 0.3300 Epoch 5/30 21/21 [==============================] - ETA: 0s - loss: 0.8847 - categorical_accuracy: 0.8273 Epoch 00005: val_categorical_accuracy did not improve from 0.35000 21/21 [==============================] - 44s 2s/step - loss: 0.8847 - categorical_accuracy: 0.8273 - val_loss: 7.4398 - val_categorical_accuracy: 0.2700 Epoch 6/30 21/21 [==============================] - ETA: 0s - loss: 0.6066 - categorical_accuracy: 0.8605 Epoch 00006: val_categorical_accuracy did not improve from 0.35000 21/21 [==============================] - 44s 2s/step - loss: 0.6066 - categorical_accuracy: 0.8605 - val_loss: 9.0164 - val_categorical_accuracy: 0.2700 Epoch 7/30 21/21 [==============================] - ETA: 0s - loss: 0.4614 - categorical_accuracy: 0.8937 Epoch 00007: val_categorical_accuracy improved from 0.35000 to 0.44000, saving model to Conv3D_5_1_2021-05-2706_58_05.690846/model-00007-0.46137-0.89367-6.27081-0.44000.h5 Epoch 00007: ReduceLROnPlateau reducing learning rate to 0.00020000000949949026. 21/21 [==============================] - 46s 2s/step - loss: 0.4614 - categorical_accuracy: 0.8937 - val_loss: 6.2708 - val_categorical_accuracy: 0.4400 Epoch 8/30 21/21 [==============================] - ETA: 0s - loss: 0.1772 - categorical_accuracy: 0.9532 Epoch 00008: val_categorical_accuracy did not improve from 0.44000 21/21 [==============================] - 43s 2s/step - loss: 0.1772 - categorical_accuracy: 0.9532 - val_loss: 7.5748 - val_categorical_accuracy: 0.3300 Epoch 9/30 21/21 [==============================] - ETA: 0s - loss: 0.0675 - categorical_accuracy: 0.9774 Epoch 00009: val_categorical_accuracy did not improve from 0.44000 21/21 [==============================] - 45s 2s/step - loss: 0.0675 - categorical_accuracy: 0.9774 - val_loss: 7.4971 - val_categorical_accuracy: 0.3600 Epoch 10/30 21/21 [==============================] - ETA: 0s - loss: 0.0314 - categorical_accuracy: 0.9910 Epoch 00010: val_categorical_accuracy did not improve from 0.44000 21/21 [==============================] - 45s 2s/step - loss: 0.0314 - categorical_accuracy: 0.9910 - val_loss: 7.9183 - val_categorical_accuracy: 0.3100 Epoch 11/30 21/21 [==============================] - ETA: 0s - loss: 0.0280 - categorical_accuracy: 0.9910 Epoch 00011: val_categorical_accuracy did not improve from 0.44000 Epoch 00011: ReduceLROnPlateau reducing learning rate to 4.0000001899898055e-05. 21/21 [==============================] - 45s 2s/step - loss: 0.0280 - categorical_accuracy: 0.9910 - val_loss: 7.5536 - val_categorical_accuracy: 0.3800 Epoch 12/30 21/21 [==============================] - ETA: 0s - loss: 0.0199 - categorical_accuracy: 0.9940 Epoch 00012: val_categorical_accuracy did not improve from 0.44000 21/21 [==============================] - 44s 2s/step - loss: 0.0199 - categorical_accuracy: 0.9940 - val_loss: 6.5849 - val_categorical_accuracy: 0.3900 Epoch 13/30 21/21 [==============================] - ETA: 0s - loss: 0.0223 - categorical_accuracy: 0.9932 Epoch 00013: val_categorical_accuracy did not improve from 0.44000 21/21 [==============================] - 45s 2s/step - loss: 0.0223 - categorical_accuracy: 0.9932 - val_loss: 6.1129 - val_categorical_accuracy: 0.4400 Epoch 00013: early stopping
# Model 10 - Conv3D Model with filter size (3,3,3)
# Same as above model without augmentation
# Optimizer - SGD
frame_height = 100
frame_width = 100
num_frames = 15
size_batch = 20
num_epochs = 30
augment = False
normalize = True
model = model_Conv3D_5(num_frames, frame_height, frame_width)
optimiser = optimizers.SGD()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
pmodel_history = trainer(model, 'Conv3D_5_2', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalize)
modelplot(model_history)
Model: "sequential_11" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d_43 (Conv3D) (None, 15, 100, 100, 32) 2624 _________________________________________________________________ batch_normalization_51 (Batc (None, 15, 100, 100, 32) 128 _________________________________________________________________ leaky_re_lu_4 (LeakyReLU) (None, 15, 100, 100, 32) 0 _________________________________________________________________ max_pooling3d_42 (MaxPooling (None, 7, 50, 100, 32) 0 _________________________________________________________________ conv3d_44 (Conv3D) (None, 7, 50, 100, 64) 55360 _________________________________________________________________ batch_normalization_52 (Batc (None, 7, 50, 100, 64) 256 _________________________________________________________________ leaky_re_lu_5 (LeakyReLU) (None, 7, 50, 100, 64) 0 _________________________________________________________________ conv3d_45 (Conv3D) (None, 5, 48, 98, 64) 110656 _________________________________________________________________ batch_normalization_53 (Batc (None, 5, 48, 98, 64) 256 _________________________________________________________________ leaky_re_lu_6 (LeakyReLU) (None, 5, 48, 98, 64) 0 _________________________________________________________________ max_pooling3d_43 (MaxPooling (None, 2, 24, 49, 64) 0 _________________________________________________________________ dropout_21 (Dropout) (None, 2, 24, 49, 64) 0 _________________________________________________________________ flatten_11 (Flatten) (None, 150528) 0 _________________________________________________________________ dropout_22 (Dropout) (None, 150528) 0 _________________________________________________________________ dense_30 (Dense) (None, 512) 77070848 _________________________________________________________________ leaky_re_lu_7 (LeakyReLU) (None, 512) 0 _________________________________________________________________ dense_31 (Dense) (None, 5) 2565 _________________________________________________________________ activation_66 (Activation) (None, 5) 0 ================================================================= Total params: 77,242,693 Trainable params: 77,242,373 Non-trainable params: 320 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 20 Image Indexes: [ 0 2 4 6 8 10 12 15 17 19 21 23 25 27 29] Epoch 1/30 2/34 [>.............................] - ETA: 3s - loss: 48.7486 - categorical_accuracy: 0.1750WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.0708s vs `on_train_batch_end` time: 0.1202s). Check your callbacks. 34/34 [==============================] - ETA: 0s - loss: 12.0368 - categorical_accuracy: 0.2986Source path = /datasets/Project_data/val ; batch size = 20 Image Indexes: [ 0 2 4 6 8 10 12 15 17 19 21 23 25 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.23000, saving model to Conv3D_5_2_2021-05-2707_07_59.584754/model-00001-12.03685-0.29864-11.57296-0.23000.h5 34/34 [==============================] - 38s 1s/step - loss: 12.0368 - categorical_accuracy: 0.2986 - val_loss: 11.5730 - val_categorical_accuracy: 0.2300 Epoch 2/30 34/34 [==============================] - ETA: 0s - loss: 1.2669 - categorical_accuracy: 0.4992 Epoch 00002: val_categorical_accuracy did not improve from 0.23000 34/34 [==============================] - 37s 1s/step - loss: 1.2669 - categorical_accuracy: 0.4992 - val_loss: 3.4800 - val_categorical_accuracy: 0.2000 Epoch 3/30 33/34 [============================>.] - ETA: 0s - loss: 0.9723 - categorical_accuracy: 0.6212 Epoch 00003: val_categorical_accuracy improved from 0.23000 to 0.43000, saving model to Conv3D_5_2_2021-05-2707_07_59.584754/model-00003-0.97579-0.62142-1.52806-0.43000.h5 34/34 [==============================] - 39s 1s/step - loss: 0.9758 - categorical_accuracy: 0.6214 - val_loss: 1.5281 - val_categorical_accuracy: 0.4300 Epoch 4/30 33/34 [============================>.] - ETA: 0s - loss: 0.7585 - categorical_accuracy: 0.7061 Epoch 00004: val_categorical_accuracy did not improve from 0.43000 34/34 [==============================] - 37s 1s/step - loss: 0.7614 - categorical_accuracy: 0.7044 - val_loss: 3.2656 - val_categorical_accuracy: 0.2200 Epoch 5/30 33/34 [============================>.] - ETA: 0s - loss: 0.6705 - categorical_accuracy: 0.7833 Epoch 00005: val_categorical_accuracy did not improve from 0.43000 34/34 [==============================] - 37s 1s/step - loss: 0.6716 - categorical_accuracy: 0.7828 - val_loss: 1.7326 - val_categorical_accuracy: 0.3700 Epoch 6/30 33/34 [============================>.] - ETA: 0s - loss: 0.4768 - categorical_accuracy: 0.8333 Epoch 00006: val_categorical_accuracy improved from 0.43000 to 0.48000, saving model to Conv3D_5_2_2021-05-2707_07_59.584754/model-00006-0.47892-0.83258-1.59122-0.48000.h5 34/34 [==============================] - 38s 1s/step - loss: 0.4789 - categorical_accuracy: 0.8326 - val_loss: 1.5912 - val_categorical_accuracy: 0.4800 Epoch 7/30 33/34 [============================>.] - ETA: 0s - loss: 0.2465 - categorical_accuracy: 0.9212 Epoch 00007: val_categorical_accuracy did not improve from 0.48000 Epoch 00007: ReduceLROnPlateau reducing learning rate to 0.0019999999552965165. 34/34 [==============================] - 38s 1s/step - loss: 0.2482 - categorical_accuracy: 0.9201 - val_loss: 1.6359 - val_categorical_accuracy: 0.4500 Epoch 8/30 33/34 [============================>.] - ETA: 0s - loss: 0.2143 - categorical_accuracy: 0.9485 Epoch 00008: val_categorical_accuracy did not improve from 0.48000 34/34 [==============================] - 38s 1s/step - loss: 0.2145 - categorical_accuracy: 0.9487 - val_loss: 1.7179 - val_categorical_accuracy: 0.4400 Epoch 9/30 33/34 [============================>.] - ETA: 0s - loss: 0.1254 - categorical_accuracy: 0.9833 Epoch 00009: val_categorical_accuracy improved from 0.48000 to 0.54000, saving model to Conv3D_5_2_2021-05-2707_07_59.584754/model-00009-0.12518-0.98341-1.60063-0.54000.h5 34/34 [==============================] - 38s 1s/step - loss: 0.1252 - categorical_accuracy: 0.9834 - val_loss: 1.6006 - val_categorical_accuracy: 0.5400 Epoch 10/30 33/34 [============================>.] - ETA: 0s - loss: 0.1056 - categorical_accuracy: 0.9909 Epoch 00010: val_categorical_accuracy did not improve from 0.54000 34/34 [==============================] - 37s 1s/step - loss: 0.1053 - categorical_accuracy: 0.9910 - val_loss: 1.5140 - val_categorical_accuracy: 0.4900 Epoch 11/30 33/34 [============================>.] - ETA: 0s - loss: 0.0904 - categorical_accuracy: 0.9879 Epoch 00011: val_categorical_accuracy improved from 0.54000 to 0.56000, saving model to Conv3D_5_2_2021-05-2707_07_59.584754/model-00011-0.09004-0.98793-1.25618-0.56000.h5 34/34 [==============================] - 38s 1s/step - loss: 0.0900 - categorical_accuracy: 0.9879 - val_loss: 1.2562 - val_categorical_accuracy: 0.5600 Epoch 12/30 33/34 [============================>.] - ETA: 0s - loss: 0.0807 - categorical_accuracy: 0.9955 Epoch 00012: val_categorical_accuracy did not improve from 0.56000 34/34 [==============================] - 39s 1s/step - loss: 0.0812 - categorical_accuracy: 0.9955 - val_loss: 1.2550 - val_categorical_accuracy: 0.5500 Epoch 13/30 33/34 [============================>.] - ETA: 0s - loss: 0.0667 - categorical_accuracy: 0.9970 Epoch 00013: val_categorical_accuracy improved from 0.56000 to 0.62000, saving model to Conv3D_5_2_2021-05-2707_07_59.584754/model-00013-0.06656-0.99698-1.07824-0.62000.h5 34/34 [==============================] - 39s 1s/step - loss: 0.0666 - categorical_accuracy: 0.9970 - val_loss: 1.0782 - val_categorical_accuracy: 0.6200 Epoch 14/30 33/34 [============================>.] - ETA: 0s - loss: 0.0620 - categorical_accuracy: 0.9955 Epoch 00014: val_categorical_accuracy improved from 0.62000 to 0.68000, saving model to Conv3D_5_2_2021-05-2707_07_59.584754/model-00014-0.06218-0.99548-0.87290-0.68000.h5 34/34 [==============================] - 39s 1s/step - loss: 0.0622 - categorical_accuracy: 0.9955 - val_loss: 0.8729 - val_categorical_accuracy: 0.6800 Epoch 15/30 33/34 [============================>.] - ETA: 0s - loss: 0.0634 - categorical_accuracy: 0.9985 Epoch 00015: val_categorical_accuracy did not improve from 0.68000 34/34 [==============================] - 38s 1s/step - loss: 0.0632 - categorical_accuracy: 0.9985 - val_loss: 0.8723 - val_categorical_accuracy: 0.6700 Epoch 16/30 33/34 [============================>.] - ETA: 0s - loss: 0.0563 - categorical_accuracy: 1.0000 Epoch 00016: val_categorical_accuracy improved from 0.68000 to 0.70000, saving model to Conv3D_5_2_2021-05-2707_07_59.584754/model-00016-0.05686-1.00000-0.92047-0.70000.h5 34/34 [==============================] - 39s 1s/step - loss: 0.0569 - categorical_accuracy: 1.0000 - val_loss: 0.9205 - val_categorical_accuracy: 0.7000 Epoch 17/30 33/34 [============================>.] - ETA: 0s - loss: 0.0504 - categorical_accuracy: 1.0000 Epoch 00017: val_categorical_accuracy improved from 0.70000 to 0.75000, saving model to Conv3D_5_2_2021-05-2707_07_59.584754/model-00017-0.05079-1.00000-0.87281-0.75000.h5 34/34 [==============================] - 39s 1s/step - loss: 0.0508 - categorical_accuracy: 1.0000 - val_loss: 0.8728 - val_categorical_accuracy: 0.7500 Epoch 18/30 33/34 [============================>.] - ETA: 0s - loss: 0.0474 - categorical_accuracy: 1.0000 Epoch 00018: val_categorical_accuracy did not improve from 0.75000 34/34 [==============================] - 38s 1s/step - loss: 0.0473 - categorical_accuracy: 1.0000 - val_loss: 0.7247 - val_categorical_accuracy: 0.7200 Epoch 19/30 33/34 [============================>.] - ETA: 0s - loss: 0.0425 - categorical_accuracy: 1.0000 Epoch 00019: val_categorical_accuracy did not improve from 0.75000 34/34 [==============================] - 38s 1s/step - loss: 0.0425 - categorical_accuracy: 1.0000 - val_loss: 0.7287 - val_categorical_accuracy: 0.7100 Epoch 20/30 33/34 [============================>.] - ETA: 0s - loss: 0.0431 - categorical_accuracy: 0.9985 Epoch 00020: val_categorical_accuracy improved from 0.75000 to 0.78000, saving model to Conv3D_5_2_2021-05-2707_07_59.584754/model-00020-0.04298-0.99849-0.77329-0.78000.h5 34/34 [==============================] - 39s 1s/step - loss: 0.0430 - categorical_accuracy: 0.9985 - val_loss: 0.7733 - val_categorical_accuracy: 0.7800 Epoch 21/30 33/34 [============================>.] - ETA: 0s - loss: 0.0379 - categorical_accuracy: 1.0000 Epoch 00021: val_categorical_accuracy did not improve from 0.78000 34/34 [==============================] - 38s 1s/step - loss: 0.0379 - categorical_accuracy: 1.0000 - val_loss: 0.7581 - val_categorical_accuracy: 0.7500 Epoch 22/30 33/34 [============================>.] - ETA: 0s - loss: 0.0359 - categorical_accuracy: 0.9970 Epoch 00022: val_categorical_accuracy did not improve from 0.78000 Epoch 00022: ReduceLROnPlateau reducing learning rate to 0.0003999999724328518. 34/34 [==============================] - 38s 1s/step - loss: 0.0357 - categorical_accuracy: 0.9970 - val_loss: 0.8165 - val_categorical_accuracy: 0.7800 Epoch 23/30 33/34 [============================>.] - ETA: 0s - loss: 0.0346 - categorical_accuracy: 1.0000 Epoch 00023: val_categorical_accuracy did not improve from 0.78000 34/34 [==============================] - 38s 1s/step - loss: 0.0345 - categorical_accuracy: 1.0000 - val_loss: 0.8147 - val_categorical_accuracy: 0.7400 Epoch 24/30 33/34 [============================>.] - ETA: 0s - loss: 0.0295 - categorical_accuracy: 1.0000 Epoch 00024: val_categorical_accuracy did not improve from 0.78000 34/34 [==============================] - 38s 1s/step - loss: 0.0295 - categorical_accuracy: 1.0000 - val_loss: 0.7578 - val_categorical_accuracy: 0.7200 Epoch 25/30 34/34 [==============================] - ETA: 0s - loss: 0.0309 - categorical_accuracy: 1.0000 Epoch 00025: val_categorical_accuracy did not improve from 0.78000 34/34 [==============================] - 38s 1s/step - loss: 0.0309 - categorical_accuracy: 1.0000 - val_loss: 0.7737 - val_categorical_accuracy: 0.7700 Epoch 26/30 34/34 [==============================] - ETA: 0s - loss: 0.0311 - categorical_accuracy: 0.9985 Epoch 00026: val_categorical_accuracy did not improve from 0.78000 Epoch 00026: ReduceLROnPlateau reducing learning rate to 7.999999215826393e-05. 34/34 [==============================] - 38s 1s/step - loss: 0.0311 - categorical_accuracy: 0.9985 - val_loss: 0.7635 - val_categorical_accuracy: 0.7500 Epoch 27/30 34/34 [==============================] - ETA: 0s - loss: 0.0298 - categorical_accuracy: 1.0000 Epoch 00027: val_categorical_accuracy did not improve from 0.78000 34/34 [==============================] - 38s 1s/step - loss: 0.0298 - categorical_accuracy: 1.0000 - val_loss: 0.8104 - val_categorical_accuracy: 0.7500 Epoch 28/30 34/34 [==============================] - ETA: 0s - loss: 0.0289 - categorical_accuracy: 1.0000 Epoch 00028: val_categorical_accuracy did not improve from 0.78000 34/34 [==============================] - 38s 1s/step - loss: 0.0289 - categorical_accuracy: 1.0000 - val_loss: 0.9135 - val_categorical_accuracy: 0.7200 Epoch 00028: early stopping
# Model 11 - Conv3D Model with filter size (3,3,3)
# Same as above model without augmentation or normalization
# Optimizer - SGD
frame_height = 100
frame_width = 100
num_frames = 15
size_batch = 20
num_epochs = 30
augment = False
normalize = False
model = model_Conv3D_5(num_frames, frame_height, frame_width)
optimiser = optimizers.SGD()
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
# Run the model and check accuracy
model_history = trainer(model, 'Conv3D_5_2_SGD', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalize)
modelplot(model_history)
Model: "sequential_12" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= conv3d_46 (Conv3D) (None, 15, 100, 100, 32) 2624 _________________________________________________________________ batch_normalization_54 (Batc (None, 15, 100, 100, 32) 128 _________________________________________________________________ leaky_re_lu_8 (LeakyReLU) (None, 15, 100, 100, 32) 0 _________________________________________________________________ max_pooling3d_44 (MaxPooling (None, 7, 50, 100, 32) 0 _________________________________________________________________ conv3d_47 (Conv3D) (None, 7, 50, 100, 64) 55360 _________________________________________________________________ batch_normalization_55 (Batc (None, 7, 50, 100, 64) 256 _________________________________________________________________ leaky_re_lu_9 (LeakyReLU) (None, 7, 50, 100, 64) 0 _________________________________________________________________ conv3d_48 (Conv3D) (None, 5, 48, 98, 64) 110656 _________________________________________________________________ batch_normalization_56 (Batc (None, 5, 48, 98, 64) 256 _________________________________________________________________ leaky_re_lu_10 (LeakyReLU) (None, 5, 48, 98, 64) 0 _________________________________________________________________ max_pooling3d_45 (MaxPooling (None, 2, 24, 49, 64) 0 _________________________________________________________________ dropout_23 (Dropout) (None, 2, 24, 49, 64) 0 _________________________________________________________________ flatten_12 (Flatten) (None, 150528) 0 _________________________________________________________________ dropout_24 (Dropout) (None, 150528) 0 _________________________________________________________________ dense_32 (Dense) (None, 512) 77070848 _________________________________________________________________ leaky_re_lu_11 (LeakyReLU) (None, 512) 0 _________________________________________________________________ dense_33 (Dense) (None, 5) 2565 _________________________________________________________________ activation_67 (Activation) (None, 5) 0 ================================================================= Total params: 77,242,693 Trainable params: 77,242,373 Non-trainable params: 320 _________________________________________________________________ None WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 20 Image Indexes: [ 0 2 4 6 8 10 12 15 17 19 21 23 25 27 29] Epoch 1/30 33/34 [============================>.] - ETA: 0s - loss: 16.1218 - categorical_accuracy: 0.3333Source path = /datasets/Project_data/val ; batch size = 20 Image Indexes: [ 0 2 4 6 8 10 12 15 17 19 21 23 25 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.23000, saving model to Conv3D_5_2_SGD_2021-05-2707_25_58.766835/model-00001-16.05261-0.33635-143.06732-0.23000.h5 34/34 [==============================] - 38s 1s/step - loss: 16.0526 - categorical_accuracy: 0.3363 - val_loss: 143.0673 - val_categorical_accuracy: 0.2300 Epoch 2/30 34/34 [==============================] - ETA: 0s - loss: 1.0492 - categorical_accuracy: 0.5671 Epoch 00002: val_categorical_accuracy did not improve from 0.23000 34/34 [==============================] - 37s 1s/step - loss: 1.0492 - categorical_accuracy: 0.5671 - val_loss: 46.8084 - val_categorical_accuracy: 0.1800 Epoch 3/30 33/34 [============================>.] - ETA: 0s - loss: 0.9297 - categorical_accuracy: 0.6439 Epoch 00003: val_categorical_accuracy improved from 0.23000 to 0.26000, saving model to Conv3D_5_2_SGD_2021-05-2707_25_58.766835/model-00003-0.93047-0.64103-13.22679-0.26000.h5 34/34 [==============================] - 38s 1s/step - loss: 0.9305 - categorical_accuracy: 0.6410 - val_loss: 13.2268 - val_categorical_accuracy: 0.2600 Epoch 4/30 33/34 [============================>.] - ETA: 0s - loss: 0.6283 - categorical_accuracy: 0.7500 Epoch 00004: val_categorical_accuracy improved from 0.26000 to 0.39000, saving model to Conv3D_5_2_SGD_2021-05-2707_25_58.766835/model-00004-0.62801-0.74962-3.51257-0.39000.h5 34/34 [==============================] - 38s 1s/step - loss: 0.6280 - categorical_accuracy: 0.7496 - val_loss: 3.5126 - val_categorical_accuracy: 0.3900 Epoch 5/30 33/34 [============================>.] - ETA: 0s - loss: 0.5516 - categorical_accuracy: 0.7848 Epoch 00005: val_categorical_accuracy improved from 0.39000 to 0.47000, saving model to Conv3D_5_2_SGD_2021-05-2707_25_58.766835/model-00005-0.55057-0.78582-3.07600-0.47000.h5 34/34 [==============================] - 38s 1s/step - loss: 0.5506 - categorical_accuracy: 0.7858 - val_loss: 3.0760 - val_categorical_accuracy: 0.4700 Epoch 6/30 33/34 [============================>.] - ETA: 0s - loss: 0.4326 - categorical_accuracy: 0.8455 Epoch 00006: val_categorical_accuracy improved from 0.47000 to 0.48000, saving model to Conv3D_5_2_SGD_2021-05-2707_25_58.766835/model-00006-0.43255-0.84465-3.67959-0.48000.h5 34/34 [==============================] - 38s 1s/step - loss: 0.4325 - categorical_accuracy: 0.8446 - val_loss: 3.6796 - val_categorical_accuracy: 0.4800 Epoch 7/30 33/34 [============================>.] - ETA: 0s - loss: 0.2951 - categorical_accuracy: 0.9076 Epoch 00007: val_categorical_accuracy improved from 0.48000 to 0.53000, saving model to Conv3D_5_2_SGD_2021-05-2707_25_58.766835/model-00007-0.29434-0.90799-1.57486-0.53000.h5 34/34 [==============================] - 39s 1s/step - loss: 0.2943 - categorical_accuracy: 0.9080 - val_loss: 1.5749 - val_categorical_accuracy: 0.5300 Epoch 8/30 33/34 [============================>.] - ETA: 0s - loss: 0.2360 - categorical_accuracy: 0.9136 Epoch 00008: val_categorical_accuracy did not improve from 0.53000 34/34 [==============================] - 37s 1s/step - loss: 0.2390 - categorical_accuracy: 0.9110 - val_loss: 3.6673 - val_categorical_accuracy: 0.4300 Epoch 9/30 33/34 [============================>.] - ETA: 0s - loss: 0.2929 - categorical_accuracy: 0.9182 Epoch 00009: val_categorical_accuracy improved from 0.53000 to 0.71000, saving model to Conv3D_5_2_SGD_2021-05-2707_25_58.766835/model-00009-0.29185-0.91855-0.93029-0.71000.h5 34/34 [==============================] - 37s 1s/step - loss: 0.2918 - categorical_accuracy: 0.9186 - val_loss: 0.9303 - val_categorical_accuracy: 0.7100 Epoch 10/30 33/34 [============================>.] - ETA: 0s - loss: 0.1364 - categorical_accuracy: 0.9591 Epoch 00010: val_categorical_accuracy did not improve from 0.71000 34/34 [==============================] - 37s 1s/step - loss: 0.1359 - categorical_accuracy: 0.9593 - val_loss: 0.8487 - val_categorical_accuracy: 0.6900 Epoch 11/30 34/34 [==============================] - ETA: 0s - loss: 0.0651 - categorical_accuracy: 0.9910 Epoch 00011: val_categorical_accuracy did not improve from 0.71000 34/34 [==============================] - 37s 1s/step - loss: 0.0651 - categorical_accuracy: 0.9910 - val_loss: 1.8672 - val_categorical_accuracy: 0.5300 Epoch 12/30 34/34 [==============================] - ETA: 0s - loss: 0.1204 - categorical_accuracy: 0.9563 Epoch 00012: val_categorical_accuracy improved from 0.71000 to 0.75000, saving model to Conv3D_5_2_SGD_2021-05-2707_25_58.766835/model-00012-0.12044-0.95626-0.85058-0.75000.h5 34/34 [==============================] - 37s 1s/step - loss: 0.1204 - categorical_accuracy: 0.9563 - val_loss: 0.8506 - val_categorical_accuracy: 0.7500 Epoch 13/30 33/34 [============================>.] - ETA: 0s - loss: 0.0435 - categorical_accuracy: 0.9970 Epoch 00013: val_categorical_accuracy did not improve from 0.75000 34/34 [==============================] - 37s 1s/step - loss: 0.0434 - categorical_accuracy: 0.9970 - val_loss: 0.8022 - val_categorical_accuracy: 0.7100 Epoch 14/30 33/34 [============================>.] - ETA: 0s - loss: 0.0398 - categorical_accuracy: 0.9924 Epoch 00014: val_categorical_accuracy did not improve from 0.75000 34/34 [==============================] - 37s 1s/step - loss: 0.0398 - categorical_accuracy: 0.9925 - val_loss: 0.9341 - val_categorical_accuracy: 0.7500 Epoch 15/30 33/34 [============================>.] - ETA: 0s - loss: 0.0322 - categorical_accuracy: 0.9970 Epoch 00015: val_categorical_accuracy did not improve from 0.75000 34/34 [==============================] - 37s 1s/step - loss: 0.0329 - categorical_accuracy: 0.9970 - val_loss: 1.2984 - val_categorical_accuracy: 0.6500 Epoch 16/30 33/34 [============================>.] - ETA: 0s - loss: 0.0275 - categorical_accuracy: 0.9985 Epoch 00016: val_categorical_accuracy did not improve from 0.75000 34/34 [==============================] - 37s 1s/step - loss: 0.0276 - categorical_accuracy: 0.9985 - val_loss: 0.8827 - val_categorical_accuracy: 0.6900 Epoch 17/30 33/34 [============================>.] - ETA: 0s - loss: 0.0153 - categorical_accuracy: 1.0000 Epoch 00017: val_categorical_accuracy did not improve from 0.75000 Epoch 00017: ReduceLROnPlateau reducing learning rate to 0.0019999999552965165. 34/34 [==============================] - 38s 1s/step - loss: 0.0154 - categorical_accuracy: 1.0000 - val_loss: 0.9276 - val_categorical_accuracy: 0.7100 Epoch 18/30 33/34 [============================>.] - ETA: 0s - loss: 0.0130 - categorical_accuracy: 1.0000 Epoch 00018: val_categorical_accuracy improved from 0.75000 to 0.78000, saving model to Conv3D_5_2_SGD_2021-05-2707_25_58.766835/model-00018-0.01301-1.00000-0.76219-0.78000.h5 34/34 [==============================] - 38s 1s/step - loss: 0.0130 - categorical_accuracy: 1.0000 - val_loss: 0.7622 - val_categorical_accuracy: 0.7800 Epoch 19/30 33/34 [============================>.] - ETA: 0s - loss: 0.0112 - categorical_accuracy: 1.0000 Epoch 00019: val_categorical_accuracy improved from 0.78000 to 0.79000, saving model to Conv3D_5_2_SGD_2021-05-2707_25_58.766835/model-00019-0.01122-1.00000-0.86637-0.79000.h5 34/34 [==============================] - 38s 1s/step - loss: 0.0112 - categorical_accuracy: 1.0000 - val_loss: 0.8664 - val_categorical_accuracy: 0.7900 Epoch 20/30 33/34 [============================>.] - ETA: 0s - loss: 0.0110 - categorical_accuracy: 1.0000 Epoch 00020: val_categorical_accuracy did not improve from 0.79000 34/34 [==============================] - 37s 1s/step - loss: 0.0113 - categorical_accuracy: 1.0000 - val_loss: 0.7858 - val_categorical_accuracy: 0.7500 Epoch 21/30 33/34 [============================>.] - ETA: 0s - loss: 0.0105 - categorical_accuracy: 1.0000 Epoch 00021: val_categorical_accuracy did not improve from 0.79000 34/34 [==============================] - 37s 1s/step - loss: 0.0110 - categorical_accuracy: 1.0000 - val_loss: 0.7865 - val_categorical_accuracy: 0.7400 Epoch 22/30 33/34 [============================>.] - ETA: 0s - loss: 0.0122 - categorical_accuracy: 1.0000 Epoch 00022: val_categorical_accuracy did not improve from 0.79000 Epoch 00022: ReduceLROnPlateau reducing learning rate to 0.0003999999724328518. 34/34 [==============================] - 37s 1s/step - loss: 0.0121 - categorical_accuracy: 1.0000 - val_loss: 0.7696 - val_categorical_accuracy: 0.7800 Epoch 23/30 33/34 [============================>.] - ETA: 0s - loss: 0.0097 - categorical_accuracy: 1.0000 Epoch 00023: val_categorical_accuracy did not improve from 0.79000 34/34 [==============================] - 37s 1s/step - loss: 0.0096 - categorical_accuracy: 1.0000 - val_loss: 0.6703 - val_categorical_accuracy: 0.7800 Epoch 24/30 33/34 [============================>.] - ETA: 0s - loss: 0.0096 - categorical_accuracy: 1.0000 Epoch 00024: val_categorical_accuracy improved from 0.79000 to 0.81000, saving model to Conv3D_5_2_SGD_2021-05-2707_25_58.766835/model-00024-0.00958-1.00000-0.77504-0.81000.h5 34/34 [==============================] - 37s 1s/step - loss: 0.0096 - categorical_accuracy: 1.0000 - val_loss: 0.7750 - val_categorical_accuracy: 0.8100 Epoch 25/30 33/34 [============================>.] - ETA: 0s - loss: 0.0093 - categorical_accuracy: 1.0000 Epoch 00025: val_categorical_accuracy did not improve from 0.81000 34/34 [==============================] - 37s 1s/step - loss: 0.0104 - categorical_accuracy: 1.0000 - val_loss: 0.7878 - val_categorical_accuracy: 0.7800 Epoch 26/30 33/34 [============================>.] - ETA: 0s - loss: 0.0118 - categorical_accuracy: 1.0000 Epoch 00026: val_categorical_accuracy did not improve from 0.81000 34/34 [==============================] - 37s 1s/step - loss: 0.0117 - categorical_accuracy: 1.0000 - val_loss: 0.8001 - val_categorical_accuracy: 0.7800 Epoch 27/30 33/34 [============================>.] - ETA: 0s - loss: 0.0103 - categorical_accuracy: 1.0000 Epoch 00027: val_categorical_accuracy did not improve from 0.81000 Epoch 00027: ReduceLROnPlateau reducing learning rate to 7.999999215826393e-05. 34/34 [==============================] - 36s 1s/step - loss: 0.0104 - categorical_accuracy: 1.0000 - val_loss: 0.7921 - val_categorical_accuracy: 0.8000 Epoch 28/30 33/34 [============================>.] - ETA: 0s - loss: 0.0105 - categorical_accuracy: 1.0000 Epoch 00028: val_categorical_accuracy did not improve from 0.81000 34/34 [==============================] - 37s 1s/step - loss: 0.0105 - categorical_accuracy: 1.0000 - val_loss: 0.9823 - val_categorical_accuracy: 0.7500 Epoch 29/30 33/34 [============================>.] - ETA: 0s - loss: 0.0099 - categorical_accuracy: 1.0000 Epoch 00029: val_categorical_accuracy did not improve from 0.81000 34/34 [==============================] - 37s 1s/step - loss: 0.0099 - categorical_accuracy: 1.0000 - val_loss: 0.8721 - val_categorical_accuracy: 0.7900 Epoch 30/30 33/34 [============================>.] - ETA: 0s - loss: 0.0125 - categorical_accuracy: 1.0000 Epoch 00030: val_categorical_accuracy did not improve from 0.81000 34/34 [==============================] - 37s 1s/step - loss: 0.0125 - categorical_accuracy: 1.0000 - val_loss: 0.7679 - val_categorical_accuracy: 0.8000
# CNN LSTM Model
def model_LSTM_1(num_frames, frame_height, frame_width):
model = Sequential()
model.add(TimeDistributed(Conv2D(16, (2, 2), padding='same'), input_shape = (num_frames, frame_height, frame_width, 3)))
model.add(Activation('relu'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(TimeDistributed(Conv2D(32, (2, 2), padding='same')))
model.add(Activation('relu'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(TimeDistributed(Conv2D(64, (2, 2), padding='same')))
model.add(Activation('relu'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(Dropout(0.2))
model.add(TimeDistributed(Conv2D(128, (2, 2), padding='same')))
model.add(Activation('relu'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(Dropout(0.35))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(Dropout(0.35))
# model.add(TimeDistributed(Conv2D(256, (2, 2), padding='same')))
# model.add(Activation('relu'))
# model.add(TimeDistributed(BatchNormalization()))
# model.add(TimeDistributed(MaxPooling2D((2, 2))))
# model.add(Dropout(0.5))
model.add(TimeDistributed(Flatten()))
model.add(LSTM(units=128, return_sequences=True, activation='tanh', recurrent_dropout=0.25))
model.add(Dropout(0.25))
model.add(LSTM(units=50, activation='tanh', recurrent_dropout=0.25))
model.add(Dropout(0.25))
# Final softmax layer for the 5 classes
model.add(Dense(5))
model.add(Activation('softmax'))
return model
# Model 12 - CNN LSTM Model
num_frames = 20
frame_height = 100
frame_width = 100
size_batch = 40
num_epochs = 100
augment = False
normalize = False
model = model_LSTM_1(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam(learning_rate=0.0005)
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
WARNING:tensorflow:Layer lstm will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU WARNING:tensorflow:Layer lstm_1 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU Model: "sequential_13" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= time_distributed (TimeDistri (None, 20, 100, 100, 16) 208 _________________________________________________________________ activation_68 (Activation) (None, 20, 100, 100, 16) 0 _________________________________________________________________ time_distributed_1 (TimeDist (None, 20, 100, 100, 16) 64 _________________________________________________________________ time_distributed_2 (TimeDist (None, 20, 50, 50, 16) 0 _________________________________________________________________ time_distributed_3 (TimeDist (None, 20, 50, 50, 32) 2080 _________________________________________________________________ activation_69 (Activation) (None, 20, 50, 50, 32) 0 _________________________________________________________________ time_distributed_4 (TimeDist (None, 20, 50, 50, 32) 128 _________________________________________________________________ time_distributed_5 (TimeDist (None, 20, 25, 25, 32) 0 _________________________________________________________________ time_distributed_6 (TimeDist (None, 20, 25, 25, 64) 8256 _________________________________________________________________ activation_70 (Activation) (None, 20, 25, 25, 64) 0 _________________________________________________________________ time_distributed_7 (TimeDist (None, 20, 25, 25, 64) 256 _________________________________________________________________ time_distributed_8 (TimeDist (None, 20, 12, 12, 64) 0 _________________________________________________________________ time_distributed_9 (TimeDist (None, 20, 12, 12, 64) 256 _________________________________________________________________ time_distributed_10 (TimeDis (None, 20, 6, 6, 64) 0 _________________________________________________________________ dropout_25 (Dropout) (None, 20, 6, 6, 64) 0 _________________________________________________________________ time_distributed_11 (TimeDis (None, 20, 6, 6, 128) 32896 _________________________________________________________________ activation_71 (Activation) (None, 20, 6, 6, 128) 0 _________________________________________________________________ time_distributed_12 (TimeDis (None, 20, 6, 6, 128) 512 _________________________________________________________________ time_distributed_13 (TimeDis (None, 20, 3, 3, 128) 0 _________________________________________________________________ dropout_26 (Dropout) (None, 20, 3, 3, 128) 0 _________________________________________________________________ time_distributed_14 (TimeDis (None, 20, 3, 3, 128) 512 _________________________________________________________________ time_distributed_15 (TimeDis (None, 20, 1, 1, 128) 0 _________________________________________________________________ dropout_27 (Dropout) (None, 20, 1, 1, 128) 0 _________________________________________________________________ time_distributed_16 (TimeDis (None, 20, 128) 0 _________________________________________________________________ lstm (LSTM) (None, 20, 128) 131584 _________________________________________________________________ dropout_28 (Dropout) (None, 20, 128) 0 _________________________________________________________________ lstm_1 (LSTM) (None, 50) 35800 _________________________________________________________________ dropout_29 (Dropout) (None, 50) 0 _________________________________________________________________ dense_34 (Dense) (None, 5) 255 _________________________________________________________________ activation_72 (Activation) (None, 5) 0 ================================================================= Total params: 212,807 Trainable params: 211,943 Non-trainable params: 864 _________________________________________________________________ None
model_history = trainer(model, 'model_LSTM_1', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalize)
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 40 Image Indexes: [ 0 2 3 5 6 8 9 11 12 14 15 17 18 20 21 23 24 26 27 29] Epoch 1/100 17/17 [==============================] - ETA: 0s - loss: 1.4727 - categorical_accuracy: 0.3152Source path = /datasets/Project_data/val ; batch size = 40 Image Indexes: [ 0 2 3 5 6 8 9 11 12 14 15 17 18 20 21 23 24 26 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.24000, saving model to model_LSTM_1_2021-05-2707_44_46.717699/model-00001-1.47275-0.31523-1.94819-0.24000.h5 17/17 [==============================] - 48s 3s/step - loss: 1.4727 - categorical_accuracy: 0.3152 - val_loss: 1.9482 - val_categorical_accuracy: 0.2400 Epoch 2/100 17/17 [==============================] - ETA: 0s - loss: 1.2486 - categorical_accuracy: 0.4540 Epoch 00002: val_categorical_accuracy did not improve from 0.24000 17/17 [==============================] - 47s 3s/step - loss: 1.2486 - categorical_accuracy: 0.4540 - val_loss: 2.1559 - val_categorical_accuracy: 0.2400 Epoch 3/100 17/17 [==============================] - ETA: 0s - loss: 1.1218 - categorical_accuracy: 0.5339 Epoch 00003: val_categorical_accuracy did not improve from 0.24000 17/17 [==============================] - 47s 3s/step - loss: 1.1218 - categorical_accuracy: 0.5339 - val_loss: 2.2906 - val_categorical_accuracy: 0.1900 Epoch 4/100 17/17 [==============================] - ETA: 0s - loss: 0.9805 - categorical_accuracy: 0.6033 Epoch 00004: val_categorical_accuracy did not improve from 0.24000 17/17 [==============================] - 49s 3s/step - loss: 0.9805 - categorical_accuracy: 0.6033 - val_loss: 2.1463 - val_categorical_accuracy: 0.2300 Epoch 5/100 17/17 [==============================] - ETA: 0s - loss: 0.8610 - categorical_accuracy: 0.6652 Epoch 00005: val_categorical_accuracy improved from 0.24000 to 0.31000, saving model to model_LSTM_1_2021-05-2707_44_46.717699/model-00005-0.86102-0.66516-1.78213-0.31000.h5 17/17 [==============================] - 50s 3s/step - loss: 0.8610 - categorical_accuracy: 0.6652 - val_loss: 1.7821 - val_categorical_accuracy: 0.3100 Epoch 6/100 17/17 [==============================] - ETA: 0s - loss: 0.7250 - categorical_accuracy: 0.7195 Epoch 00006: val_categorical_accuracy improved from 0.31000 to 0.42000, saving model to model_LSTM_1_2021-05-2707_44_46.717699/model-00006-0.72499-0.71946-1.42002-0.42000.h5 17/17 [==============================] - 47s 3s/step - loss: 0.7250 - categorical_accuracy: 0.7195 - val_loss: 1.4200 - val_categorical_accuracy: 0.4200 Epoch 7/100 17/17 [==============================] - ETA: 0s - loss: 0.6619 - categorical_accuracy: 0.7602 Epoch 00007: val_categorical_accuracy improved from 0.42000 to 0.48000, saving model to model_LSTM_1_2021-05-2707_44_46.717699/model-00007-0.66193-0.76018-1.33192-0.48000.h5 17/17 [==============================] - 49s 3s/step - loss: 0.6619 - categorical_accuracy: 0.7602 - val_loss: 1.3319 - val_categorical_accuracy: 0.4800 Epoch 8/100 17/17 [==============================] - ETA: 0s - loss: 0.5284 - categorical_accuracy: 0.8024 Epoch 00008: val_categorical_accuracy improved from 0.48000 to 0.56000, saving model to model_LSTM_1_2021-05-2707_44_46.717699/model-00008-0.52838-0.80241-1.12043-0.56000.h5 17/17 [==============================] - 50s 3s/step - loss: 0.5284 - categorical_accuracy: 0.8024 - val_loss: 1.1204 - val_categorical_accuracy: 0.5600 Epoch 9/100 17/17 [==============================] - ETA: 0s - loss: 0.4876 - categorical_accuracy: 0.8235 Epoch 00009: val_categorical_accuracy improved from 0.56000 to 0.57000, saving model to model_LSTM_1_2021-05-2707_44_46.717699/model-00009-0.48763-0.82353-1.00811-0.57000.h5 17/17 [==============================] - 48s 3s/step - loss: 0.4876 - categorical_accuracy: 0.8235 - val_loss: 1.0081 - val_categorical_accuracy: 0.5700 Epoch 10/100 17/17 [==============================] - ETA: 0s - loss: 0.4517 - categorical_accuracy: 0.8522 Epoch 00010: val_categorical_accuracy did not improve from 0.57000 17/17 [==============================] - 50s 3s/step - loss: 0.4517 - categorical_accuracy: 0.8522 - val_loss: 1.2403 - val_categorical_accuracy: 0.4700 Epoch 11/100 17/17 [==============================] - ETA: 0s - loss: 0.4176 - categorical_accuracy: 0.8552 Epoch 00011: val_categorical_accuracy improved from 0.57000 to 0.58000, saving model to model_LSTM_1_2021-05-2707_44_46.717699/model-00011-0.41758-0.85520-1.02018-0.58000.h5 17/17 [==============================] - 50s 3s/step - loss: 0.4176 - categorical_accuracy: 0.8552 - val_loss: 1.0202 - val_categorical_accuracy: 0.5800 Epoch 12/100 17/17 [==============================] - ETA: 0s - loss: 0.3179 - categorical_accuracy: 0.8959 Epoch 00012: val_categorical_accuracy did not improve from 0.58000 17/17 [==============================] - 49s 3s/step - loss: 0.3179 - categorical_accuracy: 0.8959 - val_loss: 1.2889 - val_categorical_accuracy: 0.5400 Epoch 13/100 17/17 [==============================] - ETA: 0s - loss: 0.2556 - categorical_accuracy: 0.9170 Epoch 00013: val_categorical_accuracy improved from 0.58000 to 0.62000, saving model to model_LSTM_1_2021-05-2707_44_46.717699/model-00013-0.25557-0.91704-1.06903-0.62000.h5 Epoch 00013: ReduceLROnPlateau reducing learning rate to 0.00010000000474974513. 17/17 [==============================] - 51s 3s/step - loss: 0.2556 - categorical_accuracy: 0.9170 - val_loss: 1.0690 - val_categorical_accuracy: 0.6200 Epoch 14/100 17/17 [==============================] - ETA: 0s - loss: 0.2163 - categorical_accuracy: 0.9306 Epoch 00014: val_categorical_accuracy improved from 0.62000 to 0.70000, saving model to model_LSTM_1_2021-05-2707_44_46.717699/model-00014-0.21631-0.93062-0.84166-0.70000.h5 17/17 [==============================] - 50s 3s/step - loss: 0.2163 - categorical_accuracy: 0.9306 - val_loss: 0.8417 - val_categorical_accuracy: 0.7000 Epoch 15/100 17/17 [==============================] - ETA: 0s - loss: 0.1715 - categorical_accuracy: 0.9472 Epoch 00015: val_categorical_accuracy improved from 0.70000 to 0.71000, saving model to model_LSTM_1_2021-05-2707_44_46.717699/model-00015-0.17151-0.94721-0.93553-0.71000.h5 17/17 [==============================] - 49s 3s/step - loss: 0.1715 - categorical_accuracy: 0.9472 - val_loss: 0.9355 - val_categorical_accuracy: 0.7100 Epoch 16/100 17/17 [==============================] - ETA: 0s - loss: 0.1488 - categorical_accuracy: 0.9623 Epoch 00016: val_categorical_accuracy did not improve from 0.71000 17/17 [==============================] - 51s 3s/step - loss: 0.1488 - categorical_accuracy: 0.9623 - val_loss: 1.0221 - val_categorical_accuracy: 0.6700 Epoch 17/100 17/17 [==============================] - ETA: 0s - loss: 0.1414 - categorical_accuracy: 0.9593 Epoch 00017: val_categorical_accuracy improved from 0.71000 to 0.73000, saving model to model_LSTM_1_2021-05-2707_44_46.717699/model-00017-0.14138-0.95928-0.94848-0.73000.h5 17/17 [==============================] - 49s 3s/step - loss: 0.1414 - categorical_accuracy: 0.9593 - val_loss: 0.9485 - val_categorical_accuracy: 0.7300 Epoch 18/100 17/17 [==============================] - ETA: 0s - loss: 0.1472 - categorical_accuracy: 0.9487 Epoch 00018: val_categorical_accuracy did not improve from 0.73000 Epoch 00018: ReduceLROnPlateau reducing learning rate to 2.0000000949949027e-05. 17/17 [==============================] - 49s 3s/step - loss: 0.1472 - categorical_accuracy: 0.9487 - val_loss: 1.1600 - val_categorical_accuracy: 0.6200 Epoch 19/100 17/17 [==============================] - ETA: 0s - loss: 0.1255 - categorical_accuracy: 0.9668 Epoch 00019: val_categorical_accuracy did not improve from 0.73000 17/17 [==============================] - 50s 3s/step - loss: 0.1255 - categorical_accuracy: 0.9668 - val_loss: 1.0784 - val_categorical_accuracy: 0.6700 Epoch 20/100 17/17 [==============================] - ETA: 0s - loss: 0.1187 - categorical_accuracy: 0.9698 Epoch 00020: val_categorical_accuracy did not improve from 0.73000 17/17 [==============================] - 50s 3s/step - loss: 0.1187 - categorical_accuracy: 0.9698 - val_loss: 1.2679 - val_categorical_accuracy: 0.6000 Epoch 21/100 17/17 [==============================] - ETA: 0s - loss: 0.1092 - categorical_accuracy: 0.9744 Epoch 00021: val_categorical_accuracy did not improve from 0.73000 17/17 [==============================] - 49s 3s/step - loss: 0.1092 - categorical_accuracy: 0.9744 - val_loss: 0.8543 - val_categorical_accuracy: 0.7300 Epoch 22/100 17/17 [==============================] - ETA: 0s - loss: 0.1192 - categorical_accuracy: 0.9713 Epoch 00022: val_categorical_accuracy did not improve from 0.73000 Epoch 00022: ReduceLROnPlateau reducing learning rate to 4.000000262749381e-06. 17/17 [==============================] - 50s 3s/step - loss: 0.1192 - categorical_accuracy: 0.9713 - val_loss: 0.9960 - val_categorical_accuracy: 0.6900 Epoch 23/100 17/17 [==============================] - ETA: 0s - loss: 0.1159 - categorical_accuracy: 0.9698 Epoch 00023: val_categorical_accuracy improved from 0.73000 to 0.78000, saving model to model_LSTM_1_2021-05-2707_44_46.717699/model-00023-0.11589-0.96983-0.68263-0.78000.h5 17/17 [==============================] - 49s 3s/step - loss: 0.1159 - categorical_accuracy: 0.9698 - val_loss: 0.6826 - val_categorical_accuracy: 0.7800 Epoch 24/100 17/17 [==============================] - ETA: 0s - loss: 0.1090 - categorical_accuracy: 0.9744 Epoch 00024: val_categorical_accuracy did not improve from 0.78000 17/17 [==============================] - 49s 3s/step - loss: 0.1090 - categorical_accuracy: 0.9744 - val_loss: 1.0274 - val_categorical_accuracy: 0.6800 Epoch 25/100 17/17 [==============================] - ETA: 0s - loss: 0.1171 - categorical_accuracy: 0.9744 Epoch 00025: val_categorical_accuracy did not improve from 0.78000 17/17 [==============================] - 49s 3s/step - loss: 0.1171 - categorical_accuracy: 0.9744 - val_loss: 0.9882 - val_categorical_accuracy: 0.7000 Epoch 26/100 17/17 [==============================] - ETA: 0s - loss: 0.1056 - categorical_accuracy: 0.9729 Epoch 00026: val_categorical_accuracy did not improve from 0.78000 17/17 [==============================] - 50s 3s/step - loss: 0.1056 - categorical_accuracy: 0.9729 - val_loss: 1.0284 - val_categorical_accuracy: 0.6800 Epoch 27/100 17/17 [==============================] - ETA: 0s - loss: 0.1036 - categorical_accuracy: 0.9774 Epoch 00027: val_categorical_accuracy did not improve from 0.78000 Epoch 00027: ReduceLROnPlateau reducing learning rate to 8.000000889296644e-07. 17/17 [==============================] - 49s 3s/step - loss: 0.1036 - categorical_accuracy: 0.9774 - val_loss: 1.0294 - val_categorical_accuracy: 0.6900 Epoch 28/100 17/17 [==============================] - ETA: 0s - loss: 0.1063 - categorical_accuracy: 0.9744 Epoch 00028: val_categorical_accuracy did not improve from 0.78000 17/17 [==============================] - 51s 3s/step - loss: 0.1063 - categorical_accuracy: 0.9744 - val_loss: 0.9584 - val_categorical_accuracy: 0.7200 Epoch 29/100 17/17 [==============================] - ETA: 0s - loss: 0.1196 - categorical_accuracy: 0.9713 Epoch 00029: val_categorical_accuracy did not improve from 0.78000 17/17 [==============================] - 51s 3s/step - loss: 0.1196 - categorical_accuracy: 0.9713 - val_loss: 1.0451 - val_categorical_accuracy: 0.6900 Epoch 30/100 17/17 [==============================] - ETA: 0s - loss: 0.1190 - categorical_accuracy: 0.9713 Epoch 00030: val_categorical_accuracy did not improve from 0.78000 17/17 [==============================] - 49s 3s/step - loss: 0.1190 - categorical_accuracy: 0.9713 - val_loss: 0.8905 - val_categorical_accuracy: 0.7400 Epoch 31/100 17/17 [==============================] - ETA: 0s - loss: 0.1015 - categorical_accuracy: 0.9789 Epoch 00031: val_categorical_accuracy did not improve from 0.78000 Epoch 00031: ReduceLROnPlateau reducing learning rate to 1.6000001323845936e-07. 17/17 [==============================] - 51s 3s/step - loss: 0.1015 - categorical_accuracy: 0.9789 - val_loss: 0.9313 - val_categorical_accuracy: 0.7300 Epoch 32/100 17/17 [==============================] - ETA: 0s - loss: 0.1265 - categorical_accuracy: 0.9623 Epoch 00032: val_categorical_accuracy did not improve from 0.78000 17/17 [==============================] - 51s 3s/step - loss: 0.1265 - categorical_accuracy: 0.9623 - val_loss: 0.8525 - val_categorical_accuracy: 0.7600 Epoch 33/100 17/17 [==============================] - ETA: 0s - loss: 0.1050 - categorical_accuracy: 0.9774 Epoch 00033: val_categorical_accuracy did not improve from 0.78000 17/17 [==============================] - 50s 3s/step - loss: 0.1050 - categorical_accuracy: 0.9774 - val_loss: 0.8898 - val_categorical_accuracy: 0.7200 Epoch 00033: early stopping
modelplot(model_history)
# CNN GRU Model
def model_GRU_1(num_frames, frame_height, frame_width):
model = Sequential()
model.add(TimeDistributed(Conv2D(16, (2, 2), padding='same'), input_shape = (num_frames, frame_height, frame_width, 3)))
model.add(Activation('relu'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(TimeDistributed(Conv2D(32, (2, 2), padding='same')))
model.add(Activation('relu'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(TimeDistributed(Conv2D(64, (2, 2), padding='same')))
model.add(Activation('relu'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(Dropout(0.2))
model.add(TimeDistributed(Conv2D(256, (2, 2), padding='same')))
model.add(Activation('relu'))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(Dropout(0.25))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(Dropout(0.25))
model.add(TimeDistributed(Flatten()))
model.add(GRU(units=128, return_sequences=True, activation='tanh', recurrent_dropout=0.25))
model.add(Dropout(0.35))
model.add(GRU(units=128, activation='tanh', recurrent_dropout=0.25))
model.add(Dropout(0.5))
# Final softmax layer for the 5 classes
model.add(Dense(5))
model.add(Activation('softmax'))
return model
# Model 13 - CNN GRU Model
num_frames = 16
frame_height = 120
frame_width = 120
size_batch = 50
num_epochs = 100
augment = False
normalize = False
model = model_GRU_1(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam(learning_rate=0.0008)
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
WARNING:tensorflow:Layer gru will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU WARNING:tensorflow:Layer gru_1 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU Model: "sequential_14" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= time_distributed_17 (TimeDis (None, 16, 120, 120, 16) 208 _________________________________________________________________ activation_73 (Activation) (None, 16, 120, 120, 16) 0 _________________________________________________________________ time_distributed_18 (TimeDis (None, 16, 120, 120, 16) 64 _________________________________________________________________ time_distributed_19 (TimeDis (None, 16, 60, 60, 16) 0 _________________________________________________________________ time_distributed_20 (TimeDis (None, 16, 60, 60, 32) 2080 _________________________________________________________________ activation_74 (Activation) (None, 16, 60, 60, 32) 0 _________________________________________________________________ time_distributed_21 (TimeDis (None, 16, 60, 60, 32) 128 _________________________________________________________________ time_distributed_22 (TimeDis (None, 16, 30, 30, 32) 0 _________________________________________________________________ time_distributed_23 (TimeDis (None, 16, 30, 30, 64) 8256 _________________________________________________________________ activation_75 (Activation) (None, 16, 30, 30, 64) 0 _________________________________________________________________ time_distributed_24 (TimeDis (None, 16, 30, 30, 64) 256 _________________________________________________________________ time_distributed_25 (TimeDis (None, 16, 15, 15, 64) 0 _________________________________________________________________ time_distributed_26 (TimeDis (None, 16, 15, 15, 64) 256 _________________________________________________________________ time_distributed_27 (TimeDis (None, 16, 7, 7, 64) 0 _________________________________________________________________ dropout_30 (Dropout) (None, 16, 7, 7, 64) 0 _________________________________________________________________ time_distributed_28 (TimeDis (None, 16, 7, 7, 256) 65792 _________________________________________________________________ activation_76 (Activation) (None, 16, 7, 7, 256) 0 _________________________________________________________________ time_distributed_29 (TimeDis (None, 16, 7, 7, 256) 1024 _________________________________________________________________ time_distributed_30 (TimeDis (None, 16, 3, 3, 256) 0 _________________________________________________________________ dropout_31 (Dropout) (None, 16, 3, 3, 256) 0 _________________________________________________________________ time_distributed_31 (TimeDis (None, 16, 3, 3, 256) 1024 _________________________________________________________________ time_distributed_32 (TimeDis (None, 16, 1, 1, 256) 0 _________________________________________________________________ dropout_32 (Dropout) (None, 16, 1, 1, 256) 0 _________________________________________________________________ time_distributed_33 (TimeDis (None, 16, 256) 0 _________________________________________________________________ gru (GRU) (None, 16, 128) 148224 _________________________________________________________________ dropout_33 (Dropout) (None, 16, 128) 0 _________________________________________________________________ gru_1 (GRU) (None, 128) 99072 _________________________________________________________________ dropout_34 (Dropout) (None, 128) 0 _________________________________________________________________ dense_35 (Dense) (None, 5) 645 _________________________________________________________________ activation_77 (Activation) (None, 5) 0 ================================================================= Total params: 327,029 Trainable params: 325,653 Non-trainable params: 1,376 _________________________________________________________________ None
# Run the model and check accuracy
model_history = trainer(model, 'model_GRU_1', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalize)
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 50 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 1/100 14/14 [==============================] - ETA: 0s - loss: 1.3727 - categorical_accuracy: 0.4103Source path = /datasets/Project_data/val ; batch size = 50 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.23000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00001-1.37270-0.41026-4.07666-0.23000.h5 14/14 [==============================] - 39s 3s/step - loss: 1.3727 - categorical_accuracy: 0.4103 - val_loss: 4.0767 - val_categorical_accuracy: 0.2300 Epoch 2/100 14/14 [==============================] - ETA: 0s - loss: 1.2104 - categorical_accuracy: 0.4977 Epoch 00002: val_categorical_accuracy improved from 0.23000 to 0.26000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00002-1.21039-0.49774-2.76727-0.26000.h5 14/14 [==============================] - 40s 3s/step - loss: 1.2104 - categorical_accuracy: 0.4977 - val_loss: 2.7673 - val_categorical_accuracy: 0.2600 Epoch 3/100 14/14 [==============================] - ETA: 0s - loss: 1.0709 - categorical_accuracy: 0.5611 Epoch 00003: val_categorical_accuracy did not improve from 0.26000 14/14 [==============================] - 39s 3s/step - loss: 1.0709 - categorical_accuracy: 0.5611 - val_loss: 2.5087 - val_categorical_accuracy: 0.2400 Epoch 4/100 14/14 [==============================] - ETA: 0s - loss: 0.9489 - categorical_accuracy: 0.6275 Epoch 00004: val_categorical_accuracy improved from 0.26000 to 0.34000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00004-0.94889-0.62745-2.25729-0.34000.h5 14/14 [==============================] - 39s 3s/step - loss: 0.9489 - categorical_accuracy: 0.6275 - val_loss: 2.2573 - val_categorical_accuracy: 0.3400 Epoch 5/100 14/14 [==============================] - ETA: 0s - loss: 0.8132 - categorical_accuracy: 0.6833 Epoch 00005: val_categorical_accuracy did not improve from 0.34000 14/14 [==============================] - 39s 3s/step - loss: 0.8132 - categorical_accuracy: 0.6833 - val_loss: 2.2854 - val_categorical_accuracy: 0.2400 Epoch 6/100 14/14 [==============================] - ETA: 0s - loss: 0.7344 - categorical_accuracy: 0.7225 Epoch 00006: val_categorical_accuracy did not improve from 0.34000 14/14 [==============================] - 38s 3s/step - loss: 0.7344 - categorical_accuracy: 0.7225 - val_loss: 2.2780 - val_categorical_accuracy: 0.2000 Epoch 7/100 14/14 [==============================] - ETA: 0s - loss: 0.6440 - categorical_accuracy: 0.7436 Epoch 00007: val_categorical_accuracy did not improve from 0.34000 14/14 [==============================] - 38s 3s/step - loss: 0.6440 - categorical_accuracy: 0.7436 - val_loss: 2.3545 - val_categorical_accuracy: 0.2900 Epoch 8/100 14/14 [==============================] - ETA: 0s - loss: 0.5719 - categorical_accuracy: 0.7949 Epoch 00008: val_categorical_accuracy did not improve from 0.34000 14/14 [==============================] - 39s 3s/step - loss: 0.5719 - categorical_accuracy: 0.7949 - val_loss: 2.1001 - val_categorical_accuracy: 0.2900 Epoch 9/100 14/14 [==============================] - ETA: 0s - loss: 0.4816 - categorical_accuracy: 0.8341 Epoch 00009: val_categorical_accuracy improved from 0.34000 to 0.44000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00009-0.48156-0.83409-1.70800-0.44000.h5 14/14 [==============================] - 37s 3s/step - loss: 0.4816 - categorical_accuracy: 0.8341 - val_loss: 1.7080 - val_categorical_accuracy: 0.4400 Epoch 10/100 14/14 [==============================] - ETA: 0s - loss: 0.3577 - categorical_accuracy: 0.8778 Epoch 00010: val_categorical_accuracy improved from 0.44000 to 0.48000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00010-0.35770-0.87783-1.38309-0.48000.h5 14/14 [==============================] - 37s 3s/step - loss: 0.3577 - categorical_accuracy: 0.8778 - val_loss: 1.3831 - val_categorical_accuracy: 0.4800 Epoch 11/100 14/14 [==============================] - ETA: 0s - loss: 0.3275 - categorical_accuracy: 0.8929 Epoch 00011: val_categorical_accuracy improved from 0.48000 to 0.49000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00011-0.32749-0.89291-1.51077-0.49000.h5 14/14 [==============================] - 37s 3s/step - loss: 0.3275 - categorical_accuracy: 0.8929 - val_loss: 1.5108 - val_categorical_accuracy: 0.4900 Epoch 12/100 14/14 [==============================] - ETA: 0s - loss: 0.4365 - categorical_accuracy: 0.8446 Epoch 00012: val_categorical_accuracy did not improve from 0.49000 14/14 [==============================] - 37s 3s/step - loss: 0.4365 - categorical_accuracy: 0.8446 - val_loss: 1.7926 - val_categorical_accuracy: 0.4600 Epoch 13/100 14/14 [==============================] - ETA: 0s - loss: 0.4046 - categorical_accuracy: 0.8537 Epoch 00013: val_categorical_accuracy improved from 0.49000 to 0.65000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00013-0.40459-0.85370-1.05275-0.65000.h5 14/14 [==============================] - 37s 3s/step - loss: 0.4046 - categorical_accuracy: 0.8537 - val_loss: 1.0527 - val_categorical_accuracy: 0.6500 Epoch 14/100 14/14 [==============================] - ETA: 0s - loss: 0.2740 - categorical_accuracy: 0.9080 Epoch 00014: val_categorical_accuracy did not improve from 0.65000 14/14 [==============================] - 36s 3s/step - loss: 0.2740 - categorical_accuracy: 0.9080 - val_loss: 1.7437 - val_categorical_accuracy: 0.5100 Epoch 15/100 14/14 [==============================] - ETA: 0s - loss: 0.2455 - categorical_accuracy: 0.9201 Epoch 00015: val_categorical_accuracy improved from 0.65000 to 0.74000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00015-0.24545-0.92006-0.84254-0.74000.h5 14/14 [==============================] - 35s 3s/step - loss: 0.2455 - categorical_accuracy: 0.9201 - val_loss: 0.8425 - val_categorical_accuracy: 0.7400 Epoch 16/100 14/14 [==============================] - ETA: 0s - loss: 0.1823 - categorical_accuracy: 0.9382 Epoch 00016: val_categorical_accuracy improved from 0.74000 to 0.76000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00016-0.18234-0.93816-0.69848-0.76000.h5 14/14 [==============================] - 35s 3s/step - loss: 0.1823 - categorical_accuracy: 0.9382 - val_loss: 0.6985 - val_categorical_accuracy: 0.7600 Epoch 17/100 14/14 [==============================] - ETA: 0s - loss: 0.1537 - categorical_accuracy: 0.9472 Epoch 00017: val_categorical_accuracy improved from 0.76000 to 0.80000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00017-0.15367-0.94721-0.71181-0.80000.h5 14/14 [==============================] - 36s 3s/step - loss: 0.1537 - categorical_accuracy: 0.9472 - val_loss: 0.7118 - val_categorical_accuracy: 0.8000 Epoch 18/100 14/14 [==============================] - ETA: 0s - loss: 0.1105 - categorical_accuracy: 0.9638 Epoch 00018: val_categorical_accuracy improved from 0.80000 to 0.83000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00018-0.11049-0.96380-0.72830-0.83000.h5 14/14 [==============================] - 36s 3s/step - loss: 0.1105 - categorical_accuracy: 0.9638 - val_loss: 0.7283 - val_categorical_accuracy: 0.8300 Epoch 19/100 14/14 [==============================] - ETA: 0s - loss: 0.0727 - categorical_accuracy: 0.9804 Epoch 00019: val_categorical_accuracy did not improve from 0.83000 14/14 [==============================] - 36s 3s/step - loss: 0.0727 - categorical_accuracy: 0.9804 - val_loss: 0.5878 - val_categorical_accuracy: 0.8200 Epoch 20/100 14/14 [==============================] - ETA: 0s - loss: 0.0798 - categorical_accuracy: 0.9744 Epoch 00020: val_categorical_accuracy improved from 0.83000 to 0.85000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00020-0.07981-0.97436-0.50849-0.85000.h5 14/14 [==============================] - 36s 3s/step - loss: 0.0798 - categorical_accuracy: 0.9744 - val_loss: 0.5085 - val_categorical_accuracy: 0.8500 Epoch 21/100 14/14 [==============================] - ETA: 0s - loss: 0.0662 - categorical_accuracy: 0.9834 Epoch 00021: val_categorical_accuracy did not improve from 0.85000 14/14 [==============================] - 36s 3s/step - loss: 0.0662 - categorical_accuracy: 0.9834 - val_loss: 0.6556 - val_categorical_accuracy: 0.8000 Epoch 22/100 14/14 [==============================] - ETA: 0s - loss: 0.0732 - categorical_accuracy: 0.9819 Epoch 00022: val_categorical_accuracy did not improve from 0.85000 14/14 [==============================] - 37s 3s/step - loss: 0.0732 - categorical_accuracy: 0.9819 - val_loss: 0.6147 - val_categorical_accuracy: 0.8100 Epoch 23/100 14/14 [==============================] - ETA: 0s - loss: 0.0893 - categorical_accuracy: 0.9698 Epoch 00023: val_categorical_accuracy did not improve from 0.85000 14/14 [==============================] - 36s 3s/step - loss: 0.0893 - categorical_accuracy: 0.9698 - val_loss: 0.5403 - val_categorical_accuracy: 0.8400 Epoch 24/100 14/14 [==============================] - ETA: 0s - loss: 0.1224 - categorical_accuracy: 0.9517 Epoch 00024: val_categorical_accuracy improved from 0.85000 to 0.86000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00024-0.12237-0.95173-0.38660-0.86000.h5 14/14 [==============================] - 35s 3s/step - loss: 0.1224 - categorical_accuracy: 0.9517 - val_loss: 0.3866 - val_categorical_accuracy: 0.8600 Epoch 25/100 14/14 [==============================] - ETA: 0s - loss: 0.0744 - categorical_accuracy: 0.9759 Epoch 00025: val_categorical_accuracy did not improve from 0.86000 14/14 [==============================] - 35s 3s/step - loss: 0.0744 - categorical_accuracy: 0.9759 - val_loss: 0.7393 - val_categorical_accuracy: 0.8000 Epoch 26/100 14/14 [==============================] - ETA: 0s - loss: 0.0528 - categorical_accuracy: 0.9849 Epoch 00026: val_categorical_accuracy did not improve from 0.86000 14/14 [==============================] - 35s 2s/step - loss: 0.0528 - categorical_accuracy: 0.9849 - val_loss: 0.6031 - val_categorical_accuracy: 0.8300 Epoch 27/100 14/14 [==============================] - ETA: 0s - loss: 0.0495 - categorical_accuracy: 0.9834 Epoch 00027: val_categorical_accuracy did not improve from 0.86000 14/14 [==============================] - 35s 3s/step - loss: 0.0495 - categorical_accuracy: 0.9834 - val_loss: 0.6325 - val_categorical_accuracy: 0.8300 Epoch 28/100 14/14 [==============================] - ETA: 0s - loss: 0.0358 - categorical_accuracy: 0.9910 Epoch 00028: val_categorical_accuracy did not improve from 0.86000 Epoch 00028: ReduceLROnPlateau reducing learning rate to 0.00015999999595806003. 14/14 [==============================] - 36s 3s/step - loss: 0.0358 - categorical_accuracy: 0.9910 - val_loss: 0.5083 - val_categorical_accuracy: 0.8600 Epoch 29/100 14/14 [==============================] - ETA: 0s - loss: 0.0179 - categorical_accuracy: 0.9970 Epoch 00029: val_categorical_accuracy did not improve from 0.86000 14/14 [==============================] - 35s 3s/step - loss: 0.0179 - categorical_accuracy: 0.9970 - val_loss: 0.5753 - val_categorical_accuracy: 0.8400 Epoch 30/100 14/14 [==============================] - ETA: 0s - loss: 0.0192 - categorical_accuracy: 0.9955 Epoch 00030: val_categorical_accuracy improved from 0.86000 to 0.90000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00030-0.01918-0.99548-0.36819-0.90000.h5 14/14 [==============================] - 36s 3s/step - loss: 0.0192 - categorical_accuracy: 0.9955 - val_loss: 0.3682 - val_categorical_accuracy: 0.9000 Epoch 31/100 14/14 [==============================] - ETA: 0s - loss: 0.0145 - categorical_accuracy: 0.9955 Epoch 00031: val_categorical_accuracy did not improve from 0.90000 14/14 [==============================] - 36s 3s/step - loss: 0.0145 - categorical_accuracy: 0.9955 - val_loss: 0.4571 - val_categorical_accuracy: 0.8800 Epoch 32/100 14/14 [==============================] - ETA: 0s - loss: 0.0069 - categorical_accuracy: 1.0000 Epoch 00032: val_categorical_accuracy did not improve from 0.90000 14/14 [==============================] - 36s 3s/step - loss: 0.0069 - categorical_accuracy: 1.0000 - val_loss: 0.3349 - val_categorical_accuracy: 0.9000 Epoch 33/100 14/14 [==============================] - ETA: 0s - loss: 0.0165 - categorical_accuracy: 0.9970 Epoch 00033: val_categorical_accuracy did not improve from 0.90000 14/14 [==============================] - 35s 3s/step - loss: 0.0165 - categorical_accuracy: 0.9970 - val_loss: 0.4338 - val_categorical_accuracy: 0.8900 Epoch 34/100 14/14 [==============================] - ETA: 0s - loss: 0.0152 - categorical_accuracy: 0.9970 Epoch 00034: val_categorical_accuracy did not improve from 0.90000 14/14 [==============================] - 36s 3s/step - loss: 0.0152 - categorical_accuracy: 0.9970 - val_loss: 0.3921 - val_categorical_accuracy: 0.8900 Epoch 35/100 14/14 [==============================] - ETA: 0s - loss: 0.0132 - categorical_accuracy: 0.9955 Epoch 00035: val_categorical_accuracy did not improve from 0.90000 14/14 [==============================] - 35s 3s/step - loss: 0.0132 - categorical_accuracy: 0.9955 - val_loss: 0.4109 - val_categorical_accuracy: 0.9000 Epoch 36/100 14/14 [==============================] - ETA: 0s - loss: 0.0102 - categorical_accuracy: 0.9985 Epoch 00036: val_categorical_accuracy did not improve from 0.90000 Epoch 00036: ReduceLROnPlateau reducing learning rate to 3.199999919161201e-05. 14/14 [==============================] - 35s 3s/step - loss: 0.0102 - categorical_accuracy: 0.9985 - val_loss: 0.3605 - val_categorical_accuracy: 0.9000 Epoch 37/100 14/14 [==============================] - ETA: 0s - loss: 0.0081 - categorical_accuracy: 1.0000 Epoch 00037: val_categorical_accuracy did not improve from 0.90000 14/14 [==============================] - 35s 3s/step - loss: 0.0081 - categorical_accuracy: 1.0000 - val_loss: 0.3994 - val_categorical_accuracy: 0.9000 Epoch 38/100 14/14 [==============================] - ETA: 0s - loss: 0.0103 - categorical_accuracy: 0.9985 Epoch 00038: val_categorical_accuracy did not improve from 0.90000 14/14 [==============================] - 35s 3s/step - loss: 0.0103 - categorical_accuracy: 0.9985 - val_loss: 0.4190 - val_categorical_accuracy: 0.8900 Epoch 39/100 14/14 [==============================] - ETA: 0s - loss: 0.0078 - categorical_accuracy: 0.9985 Epoch 00039: val_categorical_accuracy did not improve from 0.90000 14/14 [==============================] - 35s 3s/step - loss: 0.0078 - categorical_accuracy: 0.9985 - val_loss: 0.4084 - val_categorical_accuracy: 0.8900 Epoch 40/100 14/14 [==============================] - ETA: 0s - loss: 0.0088 - categorical_accuracy: 1.0000 Epoch 00040: val_categorical_accuracy did not improve from 0.90000 Epoch 00040: ReduceLROnPlateau reducing learning rate to 6.399999983841554e-06. 14/14 [==============================] - 35s 3s/step - loss: 0.0088 - categorical_accuracy: 1.0000 - val_loss: 0.4590 - val_categorical_accuracy: 0.8900 Epoch 41/100 14/14 [==============================] - ETA: 0s - loss: 0.0107 - categorical_accuracy: 0.9985 Epoch 00041: val_categorical_accuracy did not improve from 0.90000 14/14 [==============================] - 35s 3s/step - loss: 0.0107 - categorical_accuracy: 0.9985 - val_loss: 0.4070 - val_categorical_accuracy: 0.8900 Epoch 42/100 14/14 [==============================] - ETA: 0s - loss: 0.0094 - categorical_accuracy: 0.9985 Epoch 00042: val_categorical_accuracy improved from 0.90000 to 0.91000, saving model to model_GRU_1_2021-05-2708_12_17.396060/model-00042-0.00941-0.99849-0.30157-0.91000.h5 14/14 [==============================] - 35s 3s/step - loss: 0.0094 - categorical_accuracy: 0.9985 - val_loss: 0.3016 - val_categorical_accuracy: 0.9100 Epoch 43/100 14/14 [==============================] - ETA: 0s - loss: 0.0102 - categorical_accuracy: 0.9985 Epoch 00043: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 35s 3s/step - loss: 0.0102 - categorical_accuracy: 0.9985 - val_loss: 0.4062 - val_categorical_accuracy: 0.8900 Epoch 44/100 14/14 [==============================] - ETA: 0s - loss: 0.0126 - categorical_accuracy: 0.9985 Epoch 00044: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 35s 2s/step - loss: 0.0126 - categorical_accuracy: 0.9985 - val_loss: 0.4583 - val_categorical_accuracy: 0.8800 Epoch 45/100 14/14 [==============================] - ETA: 0s - loss: 0.0148 - categorical_accuracy: 0.9985 Epoch 00045: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 35s 3s/step - loss: 0.0148 - categorical_accuracy: 0.9985 - val_loss: 0.4035 - val_categorical_accuracy: 0.8900 Epoch 46/100 14/14 [==============================] - ETA: 0s - loss: 0.0071 - categorical_accuracy: 1.0000 Epoch 00046: val_categorical_accuracy did not improve from 0.91000 Epoch 00046: ReduceLROnPlateau reducing learning rate to 1.2800000149582048e-06. 14/14 [==============================] - 35s 3s/step - loss: 0.0071 - categorical_accuracy: 1.0000 - val_loss: 0.3924 - val_categorical_accuracy: 0.8900 Epoch 47/100 14/14 [==============================] - ETA: 0s - loss: 0.0098 - categorical_accuracy: 0.9985 Epoch 00047: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 35s 3s/step - loss: 0.0098 - categorical_accuracy: 0.9985 - val_loss: 0.4026 - val_categorical_accuracy: 0.8800 Epoch 48/100 14/14 [==============================] - ETA: 0s - loss: 0.0095 - categorical_accuracy: 0.9985 Epoch 00048: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 35s 3s/step - loss: 0.0095 - categorical_accuracy: 0.9985 - val_loss: 0.4505 - val_categorical_accuracy: 0.8700 Epoch 49/100 14/14 [==============================] - ETA: 0s - loss: 0.0102 - categorical_accuracy: 0.9985 Epoch 00049: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 36s 3s/step - loss: 0.0102 - categorical_accuracy: 0.9985 - val_loss: 0.4004 - val_categorical_accuracy: 0.8800 Epoch 50/100 14/14 [==============================] - ETA: 0s - loss: 0.0080 - categorical_accuracy: 1.0000 Epoch 00050: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 35s 3s/step - loss: 0.0080 - categorical_accuracy: 1.0000 - val_loss: 0.2545 - val_categorical_accuracy: 0.9100 Epoch 51/100 14/14 [==============================] - ETA: 0s - loss: 0.0123 - categorical_accuracy: 0.9985 Epoch 00051: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 35s 3s/step - loss: 0.0123 - categorical_accuracy: 0.9985 - val_loss: 0.3982 - val_categorical_accuracy: 0.8800 Epoch 52/100 14/14 [==============================] - ETA: 0s - loss: 0.0074 - categorical_accuracy: 1.0000 Epoch 00052: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 36s 3s/step - loss: 0.0074 - categorical_accuracy: 1.0000 - val_loss: 0.4039 - val_categorical_accuracy: 0.8500 Epoch 53/100 14/14 [==============================] - ETA: 0s - loss: 0.0091 - categorical_accuracy: 0.9970 Epoch 00053: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 36s 3s/step - loss: 0.0091 - categorical_accuracy: 0.9970 - val_loss: 0.3966 - val_categorical_accuracy: 0.8800 Epoch 54/100 14/14 [==============================] - ETA: 0s - loss: 0.0109 - categorical_accuracy: 0.9985 Epoch 00054: val_categorical_accuracy did not improve from 0.91000 Epoch 00054: ReduceLROnPlateau reducing learning rate to 2.5599999844416746e-07. 14/14 [==============================] - 35s 3s/step - loss: 0.0109 - categorical_accuracy: 0.9985 - val_loss: 0.5221 - val_categorical_accuracy: 0.8600 Epoch 55/100 14/14 [==============================] - ETA: 0s - loss: 0.0072 - categorical_accuracy: 1.0000 Epoch 00055: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 35s 2s/step - loss: 0.0072 - categorical_accuracy: 1.0000 - val_loss: 0.3951 - val_categorical_accuracy: 0.8900 Epoch 56/100 14/14 [==============================] - ETA: 0s - loss: 0.0164 - categorical_accuracy: 0.9955 Epoch 00056: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 35s 3s/step - loss: 0.0164 - categorical_accuracy: 0.9955 - val_loss: 0.4027 - val_categorical_accuracy: 0.9000 Epoch 57/100 14/14 [==============================] - ETA: 0s - loss: 0.0075 - categorical_accuracy: 1.0000 Epoch 00057: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 35s 3s/step - loss: 0.0075 - categorical_accuracy: 1.0000 - val_loss: 0.3933 - val_categorical_accuracy: 0.8900 Epoch 58/100 14/14 [==============================] - ETA: 0s - loss: 0.0116 - categorical_accuracy: 0.9985 Epoch 00058: val_categorical_accuracy did not improve from 0.91000 Epoch 00058: ReduceLROnPlateau reducing learning rate to 5.119999855196511e-08. 14/14 [==============================] - 36s 3s/step - loss: 0.0116 - categorical_accuracy: 0.9985 - val_loss: 0.4598 - val_categorical_accuracy: 0.8800 Epoch 59/100 14/14 [==============================] - ETA: 0s - loss: 0.0115 - categorical_accuracy: 0.9970 Epoch 00059: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 35s 3s/step - loss: 0.0115 - categorical_accuracy: 0.9970 - val_loss: 0.3914 - val_categorical_accuracy: 0.8900 Epoch 60/100 14/14 [==============================] - ETA: 0s - loss: 0.0104 - categorical_accuracy: 0.9985 Epoch 00060: val_categorical_accuracy did not improve from 0.91000 14/14 [==============================] - 36s 3s/step - loss: 0.0104 - categorical_accuracy: 0.9985 - val_loss: 0.3284 - val_categorical_accuracy: 0.9000 Epoch 00060: early stopping
modelplot(model_history)
# MobileNet GRU Model
mobilenet_transfer = MobileNet(weights='imagenet', include_top=False)
def model_mobilenet_1(num_frames, frame_height, frame_width):
num_class = 5
model = Sequential()
model.add(TimeDistributed(mobilenet_transfer,input_shape=(num_frames, frame_height, frame_width,3)))
model.add(TimeDistributed(BatchNormalization()))
model.add(TimeDistributed(MaxPooling2D((2, 2))))
model.add(TimeDistributed(Flatten()))
model.add(GRU(units=64, return_sequences=False, activation='tanh', dropout=0.5, recurrent_dropout=0.5))
model.add(Dropout(0.25))
model.add(Dense(64,activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(num_class, activation='softmax'))
return model
WARNING:tensorflow:`input_shape` is undefined or non-square, or `rows` is not in [128, 160, 192, 224]. Weights for input shape (224, 224) will be loaded as the default. Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/mobilenet/mobilenet_1_0_224_tf_no_top.h5 17227776/17225924 [==============================] - 1s 0us/step
# Model 14 - MobileNet GRU
num_frames = 20
frame_height = 120
frame_width = 120
size_batch = 40
num_epochs = 100
augment = False
normalize = True
model = model_mobilenet_1(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam(learning_rate=0.00075)
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
WARNING:tensorflow:Layer gru_2 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU Model: "sequential_15" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= time_distributed_34 (TimeDis (None, 20, 3, 3, 1024) 3228864 _________________________________________________________________ time_distributed_35 (TimeDis (None, 20, 3, 3, 1024) 4096 _________________________________________________________________ time_distributed_36 (TimeDis (None, 20, 1, 1, 1024) 0 _________________________________________________________________ time_distributed_37 (TimeDis (None, 20, 1024) 0 _________________________________________________________________ gru_2 (GRU) (None, 64) 209280 _________________________________________________________________ dropout_35 (Dropout) (None, 64) 0 _________________________________________________________________ dense_36 (Dense) (None, 64) 4160 _________________________________________________________________ dropout_36 (Dropout) (None, 64) 0 _________________________________________________________________ dense_37 (Dense) (None, 5) 325 ================================================================= Total params: 3,446,725 Trainable params: 3,422,789 Non-trainable params: 23,936 _________________________________________________________________ None
# Run the model and check accuracy
model_history = trainer(model, 'model_mobilenet_1', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalize)
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 40 Image Indexes: [ 0 2 3 5 6 8 9 11 12 14 15 17 18 20 21 23 24 26 27 29] Epoch 1/100 2/17 [==>...........................] - ETA: 6s - loss: 1.9918 - categorical_accuracy: 0.1750WARNING:tensorflow:Callbacks method `on_train_batch_end` is slow compared to the batch time (batch time: 0.3285s vs `on_train_batch_end` time: 0.5255s). Check your callbacks. 17/17 [==============================] - ETA: 0s - loss: 1.5029 - categorical_accuracy: 0.3786Source path = /datasets/Project_data/val ; batch size = 40 Image Indexes: [ 0 2 3 5 6 8 9 11 12 14 15 17 18 20 21 23 24 26 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.63000, saving model to model_mobilenet_1_2021-05-2708_48_52.111646/model-00001-1.50286-0.37858-1.17717-0.63000.h5 17/17 [==============================] - 47s 3s/step - loss: 1.5029 - categorical_accuracy: 0.3786 - val_loss: 1.1772 - val_categorical_accuracy: 0.6300 Epoch 2/100 17/17 [==============================] - ETA: 0s - loss: 0.7594 - categorical_accuracy: 0.7526 Epoch 00002: val_categorical_accuracy improved from 0.63000 to 0.70000, saving model to model_mobilenet_1_2021-05-2708_48_52.111646/model-00002-0.75936-0.75264-0.87362-0.70000.h5 17/17 [==============================] - 46s 3s/step - loss: 0.7594 - categorical_accuracy: 0.7526 - val_loss: 0.8736 - val_categorical_accuracy: 0.7000 Epoch 3/100 17/17 [==============================] - ETA: 0s - loss: 0.3505 - categorical_accuracy: 0.9020 Epoch 00003: val_categorical_accuracy improved from 0.70000 to 0.81000, saving model to model_mobilenet_1_2021-05-2708_48_52.111646/model-00003-0.35048-0.90196-0.52509-0.81000.h5 17/17 [==============================] - 46s 3s/step - loss: 0.3505 - categorical_accuracy: 0.9020 - val_loss: 0.5251 - val_categorical_accuracy: 0.8100 Epoch 4/100 17/17 [==============================] - ETA: 0s - loss: 0.1598 - categorical_accuracy: 0.9653 Epoch 00004: val_categorical_accuracy improved from 0.81000 to 0.93000, saving model to model_mobilenet_1_2021-05-2708_48_52.111646/model-00004-0.15982-0.96531-0.25381-0.93000.h5 17/17 [==============================] - 48s 3s/step - loss: 0.1598 - categorical_accuracy: 0.9653 - val_loss: 0.2538 - val_categorical_accuracy: 0.9300 Epoch 5/100 17/17 [==============================] - ETA: 0s - loss: 0.0865 - categorical_accuracy: 0.9834 Epoch 00005: val_categorical_accuracy improved from 0.93000 to 0.99000, saving model to model_mobilenet_1_2021-05-2708_48_52.111646/model-00005-0.08655-0.98341-0.12960-0.99000.h5 17/17 [==============================] - 46s 3s/step - loss: 0.0865 - categorical_accuracy: 0.9834 - val_loss: 0.1296 - val_categorical_accuracy: 0.9900 Epoch 6/100 17/17 [==============================] - ETA: 0s - loss: 0.0516 - categorical_accuracy: 0.9910 Epoch 00006: val_categorical_accuracy did not improve from 0.99000 17/17 [==============================] - 46s 3s/step - loss: 0.0516 - categorical_accuracy: 0.9910 - val_loss: 0.1006 - val_categorical_accuracy: 0.9600 Epoch 7/100 17/17 [==============================] - ETA: 0s - loss: 0.0303 - categorical_accuracy: 0.9955 Epoch 00007: val_categorical_accuracy did not improve from 0.99000 17/17 [==============================] - 47s 3s/step - loss: 0.0303 - categorical_accuracy: 0.9955 - val_loss: 0.0549 - val_categorical_accuracy: 0.9900 Epoch 8/100 17/17 [==============================] - ETA: 0s - loss: 0.0300 - categorical_accuracy: 0.9955 Epoch 00008: val_categorical_accuracy did not improve from 0.99000 17/17 [==============================] - 47s 3s/step - loss: 0.0300 - categorical_accuracy: 0.9955 - val_loss: 0.0275 - val_categorical_accuracy: 0.9900 Epoch 9/100 17/17 [==============================] - ETA: 0s - loss: 0.0183 - categorical_accuracy: 0.9985 Epoch 00009: val_categorical_accuracy did not improve from 0.99000 17/17 [==============================] - 45s 3s/step - loss: 0.0183 - categorical_accuracy: 0.9985 - val_loss: 0.0591 - val_categorical_accuracy: 0.9700 Epoch 10/100 17/17 [==============================] - ETA: 0s - loss: 0.0171 - categorical_accuracy: 0.9985 Epoch 00010: val_categorical_accuracy did not improve from 0.99000 17/17 [==============================] - 45s 3s/step - loss: 0.0171 - categorical_accuracy: 0.9985 - val_loss: 0.0393 - val_categorical_accuracy: 0.9900 Epoch 11/100 17/17 [==============================] - ETA: 0s - loss: 0.0098 - categorical_accuracy: 1.0000 Epoch 00011: val_categorical_accuracy did not improve from 0.99000 17/17 [==============================] - 45s 3s/step - loss: 0.0098 - categorical_accuracy: 1.0000 - val_loss: 0.0824 - val_categorical_accuracy: 0.9800 Epoch 12/100 17/17 [==============================] - ETA: 0s - loss: 0.0086 - categorical_accuracy: 1.0000 Epoch 00012: val_categorical_accuracy did not improve from 0.99000 Epoch 00012: ReduceLROnPlateau reducing learning rate to 0.00015000000130385163. 17/17 [==============================] - 44s 3s/step - loss: 0.0086 - categorical_accuracy: 1.0000 - val_loss: 0.0362 - val_categorical_accuracy: 0.9900 Epoch 13/100 17/17 [==============================] - ETA: 0s - loss: 0.0097 - categorical_accuracy: 1.0000 Epoch 00013: val_categorical_accuracy did not improve from 0.99000 17/17 [==============================] - 47s 3s/step - loss: 0.0097 - categorical_accuracy: 1.0000 - val_loss: 0.0403 - val_categorical_accuracy: 0.9900 Epoch 14/100 17/17 [==============================] - ETA: 0s - loss: 0.0075 - categorical_accuracy: 1.0000 Epoch 00014: val_categorical_accuracy did not improve from 0.99000 17/17 [==============================] - 47s 3s/step - loss: 0.0075 - categorical_accuracy: 1.0000 - val_loss: 0.0420 - val_categorical_accuracy: 0.9900 Epoch 15/100 17/17 [==============================] - ETA: 0s - loss: 0.0072 - categorical_accuracy: 1.0000 Epoch 00015: val_categorical_accuracy improved from 0.99000 to 1.00000, saving model to model_mobilenet_1_2021-05-2708_48_52.111646/model-00015-0.00722-1.00000-0.01149-1.00000.h5 17/17 [==============================] - 47s 3s/step - loss: 0.0072 - categorical_accuracy: 1.0000 - val_loss: 0.0115 - val_categorical_accuracy: 1.0000 Epoch 16/100 17/17 [==============================] - ETA: 0s - loss: 0.0062 - categorical_accuracy: 1.0000 Epoch 00016: val_categorical_accuracy did not improve from 1.00000 17/17 [==============================] - 48s 3s/step - loss: 0.0062 - categorical_accuracy: 1.0000 - val_loss: 0.0394 - val_categorical_accuracy: 0.9800 Epoch 17/100 17/17 [==============================] - ETA: 0s - loss: 0.0065 - categorical_accuracy: 1.0000 Epoch 00017: val_categorical_accuracy did not improve from 1.00000 17/17 [==============================] - 47s 3s/step - loss: 0.0065 - categorical_accuracy: 1.0000 - val_loss: 0.0127 - val_categorical_accuracy: 0.9900 Epoch 18/100 17/17 [==============================] - ETA: 0s - loss: 0.0072 - categorical_accuracy: 1.0000 Epoch 00018: val_categorical_accuracy did not improve from 1.00000 17/17 [==============================] - 46s 3s/step - loss: 0.0072 - categorical_accuracy: 1.0000 - val_loss: 0.0411 - val_categorical_accuracy: 0.9800 Epoch 19/100 17/17 [==============================] - ETA: 0s - loss: 0.0061 - categorical_accuracy: 1.0000 Epoch 00019: val_categorical_accuracy did not improve from 1.00000 Epoch 00019: ReduceLROnPlateau reducing learning rate to 3.000000142492354e-05. 17/17 [==============================] - 47s 3s/step - loss: 0.0061 - categorical_accuracy: 1.0000 - val_loss: 0.0421 - val_categorical_accuracy: 0.9800 Epoch 20/100 17/17 [==============================] - ETA: 0s - loss: 0.0065 - categorical_accuracy: 1.0000 Epoch 00020: val_categorical_accuracy did not improve from 1.00000 17/17 [==============================] - 47s 3s/step - loss: 0.0065 - categorical_accuracy: 1.0000 - val_loss: 0.0489 - val_categorical_accuracy: 0.9700 Epoch 21/100 17/17 [==============================] - ETA: 0s - loss: 0.0065 - categorical_accuracy: 1.0000 Epoch 00021: val_categorical_accuracy did not improve from 1.00000 17/17 [==============================] - 46s 3s/step - loss: 0.0065 - categorical_accuracy: 1.0000 - val_loss: 0.0464 - val_categorical_accuracy: 0.9800 Epoch 22/100 17/17 [==============================] - ETA: 0s - loss: 0.0051 - categorical_accuracy: 1.0000 Epoch 00022: val_categorical_accuracy did not improve from 1.00000 17/17 [==============================] - 46s 3s/step - loss: 0.0051 - categorical_accuracy: 1.0000 - val_loss: 0.0444 - val_categorical_accuracy: 0.9800 Epoch 23/100 17/17 [==============================] - ETA: 0s - loss: 0.0062 - categorical_accuracy: 1.0000 Epoch 00023: val_categorical_accuracy did not improve from 1.00000 Epoch 00023: ReduceLROnPlateau reducing learning rate to 6.000000212225132e-06. 17/17 [==============================] - 45s 3s/step - loss: 0.0062 - categorical_accuracy: 1.0000 - val_loss: 0.0440 - val_categorical_accuracy: 0.9800 Epoch 24/100 17/17 [==============================] - ETA: 0s - loss: 0.0063 - categorical_accuracy: 1.0000 Epoch 00024: val_categorical_accuracy did not improve from 1.00000 17/17 [==============================] - 43s 3s/step - loss: 0.0063 - categorical_accuracy: 1.0000 - val_loss: 0.0465 - val_categorical_accuracy: 0.9800 Epoch 25/100 17/17 [==============================] - ETA: 0s - loss: 0.0048 - categorical_accuracy: 1.0000 Epoch 00025: val_categorical_accuracy did not improve from 1.00000 17/17 [==============================] - 44s 3s/step - loss: 0.0048 - categorical_accuracy: 1.0000 - val_loss: 0.0475 - val_categorical_accuracy: 0.9800 Epoch 00025: early stopping
modelplot(model_history)
# VGG16 LSTM Model with GRU
def model_VGG16_TF_GRU(num_frames, frame_height, frame_width):
num_class = 5
def create_base():
conv_base = VGG16(weights='imagenet',
include_top=False,
input_shape=(frame_height, frame_width, 3))
count=0
for layer in conv_base.layers:
count = count+1
print("num of layers in VGG16 " + str(count))
# training the model after 140 layers
split_at = 17
for layer in conv_base.layers[:split_at]: layer.trainable = False
for layer in conv_base.layers[split_at:]: layer.trainable = True
x = GlobalAveragePooling2D()(conv_base.output)
base_model = Model(conv_base.input, x)
return base_model
conv_base = create_base()
ip = Input(shape=(num_frames, frame_height, frame_width, 3))
t_conv = TimeDistributed(conv_base)(ip)
drop_l = Dropout(0.65)(t_conv)
t_GRU_1 = GRU(units=128, return_sequences=True, activation='tanh', dropout=0.6, recurrent_dropout=0.6)(drop_l)
t_GRU_2 = GRU(units=32, return_sequences=False, activation='tanh', dropout=0.5, recurrent_dropout=0.5)(t_GRU_1)
f_softmax = Dense(num_class, activation='softmax')(t_GRU_2)
model = Model(ip, f_softmax)
return model
# Model 15 - VGG16 transfer Learning Model with GRU
num_frames = 18
frame_height = 120
frame_width = 120
size_batch = 50
num_epochs = 100
augment = False
normalize = True
model = model_VGG16_TF_GRU(num_frames, frame_height, frame_width)
optimiser = optimizers.Adam(learning_rate=0.00075)
model.compile(optimizer = optimiser, loss = 'categorical_crossentropy', metrics = ['categorical_accuracy'])
print(model.summary())
Downloading data from https://storage.googleapis.com/tensorflow/keras-applications/vgg16/vgg16_weights_tf_dim_ordering_tf_kernels_notop.h5 58892288/58889256 [==============================] - 5s 0us/step num of layers in VGG16 19 WARNING:tensorflow:Layer gru_3 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU WARNING:tensorflow:Layer gru_4 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU Model: "functional_3" _________________________________________________________________ Layer (type) Output Shape Param # ================================================================= input_3 (InputLayer) [(None, 18, 120, 120, 3)] 0 _________________________________________________________________ time_distributed_38 (TimeDis (None, 18, 512) 14714688 _________________________________________________________________ dropout_37 (Dropout) (None, 18, 512) 0 _________________________________________________________________ gru_3 (GRU) (None, 18, 128) 246528 _________________________________________________________________ gru_4 (GRU) (None, 32) 15552 _________________________________________________________________ dense_38 (Dense) (None, 5) 165 ================================================================= Total params: 14,976,933 Trainable params: 2,622,053 Non-trainable params: 12,354,880 _________________________________________________________________ None
# Run the model and check accuracy
model_history = trainer(model, 'model_VGG16_TF_GRU', num_epochs, size_batch, num_frames, frame_height, frame_width, augment, normalize)
WARNING:tensorflow:`period` argument is deprecated. Please use `save_freq` to specify the frequency in number of batches seen. Source path = /datasets/Project_data/train ; batch size = 50 Image Indexes: [ 0 2 3 5 7 9 10 12 14 15 17 19 20 22 24 26 27 29] Epoch 1/100 14/14 [==============================] - ETA: 0s - loss: 1.6686 - categorical_accuracy: 0.2247Source path = /datasets/Project_data/val ; batch size = 50 Image Indexes: [ 0 2 3 5 7 9 10 12 14 15 17 19 20 22 24 26 27 29] Epoch 00001: val_categorical_accuracy improved from -inf to 0.34000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00001-1.66859-0.22474-1.45487-0.34000.h5 14/14 [==============================] - 59s 4s/step - loss: 1.6686 - categorical_accuracy: 0.2247 - val_loss: 1.4549 - val_categorical_accuracy: 0.3400 Epoch 2/100 14/14 [==============================] - ETA: 0s - loss: 1.6410 - categorical_accuracy: 0.2609 Epoch 00002: val_categorical_accuracy improved from 0.34000 to 0.52000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00002-1.64098-0.26094-1.41310-0.52000.h5 14/14 [==============================] - 43s 3s/step - loss: 1.6410 - categorical_accuracy: 0.2609 - val_loss: 1.4131 - val_categorical_accuracy: 0.5200 Epoch 3/100 14/14 [==============================] - ETA: 0s - loss: 1.6137 - categorical_accuracy: 0.2745 Epoch 00003: val_categorical_accuracy did not improve from 0.52000 14/14 [==============================] - 40s 3s/step - loss: 1.6137 - categorical_accuracy: 0.2745 - val_loss: 1.4858 - val_categorical_accuracy: 0.4800 Epoch 4/100 14/14 [==============================] - ETA: 0s - loss: 1.6269 - categorical_accuracy: 0.2836 Epoch 00004: val_categorical_accuracy did not improve from 0.52000 14/14 [==============================] - 41s 3s/step - loss: 1.6269 - categorical_accuracy: 0.2836 - val_loss: 1.4316 - val_categorical_accuracy: 0.3900 Epoch 5/100 14/14 [==============================] - ETA: 0s - loss: 1.5561 - categorical_accuracy: 0.3137 Epoch 00005: val_categorical_accuracy did not improve from 0.52000 14/14 [==============================] - 41s 3s/step - loss: 1.5561 - categorical_accuracy: 0.3137 - val_loss: 1.3687 - val_categorical_accuracy: 0.4600 Epoch 6/100 14/14 [==============================] - ETA: 0s - loss: 1.5215 - categorical_accuracy: 0.3167 Epoch 00006: val_categorical_accuracy did not improve from 0.52000 14/14 [==============================] - 40s 3s/step - loss: 1.5215 - categorical_accuracy: 0.3167 - val_loss: 1.2819 - val_categorical_accuracy: 0.4100 Epoch 7/100 14/14 [==============================] - ETA: 0s - loss: 1.5118 - categorical_accuracy: 0.3228 Epoch 00007: val_categorical_accuracy improved from 0.52000 to 0.57000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00007-1.51178-0.32278-1.15510-0.57000.h5 14/14 [==============================] - 41s 3s/step - loss: 1.5118 - categorical_accuracy: 0.3228 - val_loss: 1.1551 - val_categorical_accuracy: 0.5700 Epoch 8/100 14/14 [==============================] - ETA: 0s - loss: 1.4525 - categorical_accuracy: 0.3756 Epoch 00008: val_categorical_accuracy improved from 0.57000 to 0.63000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00008-1.45252-0.37557-1.06394-0.63000.h5 14/14 [==============================] - 41s 3s/step - loss: 1.4525 - categorical_accuracy: 0.3756 - val_loss: 1.0639 - val_categorical_accuracy: 0.6300 Epoch 9/100 14/14 [==============================] - ETA: 0s - loss: 1.3681 - categorical_accuracy: 0.4314 Epoch 00009: val_categorical_accuracy did not improve from 0.63000 14/14 [==============================] - 43s 3s/step - loss: 1.3681 - categorical_accuracy: 0.4314 - val_loss: 1.1611 - val_categorical_accuracy: 0.5500 Epoch 10/100 14/14 [==============================] - ETA: 0s - loss: 1.3206 - categorical_accuracy: 0.4691 Epoch 00010: val_categorical_accuracy improved from 0.63000 to 0.70000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00010-1.32058-0.46908-0.96274-0.70000.h5 14/14 [==============================] - 43s 3s/step - loss: 1.3206 - categorical_accuracy: 0.4691 - val_loss: 0.9627 - val_categorical_accuracy: 0.7000 Epoch 11/100 14/14 [==============================] - ETA: 0s - loss: 1.2316 - categorical_accuracy: 0.4992 Epoch 00011: val_categorical_accuracy did not improve from 0.70000 14/14 [==============================] - 42s 3s/step - loss: 1.2316 - categorical_accuracy: 0.4992 - val_loss: 1.0641 - val_categorical_accuracy: 0.6500 Epoch 12/100 14/14 [==============================] - ETA: 0s - loss: 1.1188 - categorical_accuracy: 0.5807 Epoch 00012: val_categorical_accuracy improved from 0.70000 to 0.72000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00012-1.11881-0.58069-0.83008-0.72000.h5 14/14 [==============================] - 43s 3s/step - loss: 1.1188 - categorical_accuracy: 0.5807 - val_loss: 0.8301 - val_categorical_accuracy: 0.7200 Epoch 13/100 14/14 [==============================] - ETA: 0s - loss: 0.9971 - categorical_accuracy: 0.6290 Epoch 00013: val_categorical_accuracy did not improve from 0.72000 14/14 [==============================] - 43s 3s/step - loss: 0.9971 - categorical_accuracy: 0.6290 - val_loss: 0.7992 - val_categorical_accuracy: 0.7000 Epoch 14/100 14/14 [==============================] - ETA: 0s - loss: 0.8843 - categorical_accuracy: 0.7059 Epoch 00014: val_categorical_accuracy improved from 0.72000 to 0.73000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00014-0.88429-0.70588-0.73810-0.73000.h5 14/14 [==============================] - 42s 3s/step - loss: 0.8843 - categorical_accuracy: 0.7059 - val_loss: 0.7381 - val_categorical_accuracy: 0.7300 Epoch 15/100 14/14 [==============================] - ETA: 0s - loss: 0.7102 - categorical_accuracy: 0.7572 Epoch 00015: val_categorical_accuracy improved from 0.73000 to 0.74000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00015-0.71022-0.75716-0.67019-0.74000.h5 14/14 [==============================] - 43s 3s/step - loss: 0.7102 - categorical_accuracy: 0.7572 - val_loss: 0.6702 - val_categorical_accuracy: 0.7400 Epoch 16/100 14/14 [==============================] - ETA: 0s - loss: 0.5708 - categorical_accuracy: 0.8130 Epoch 00016: val_categorical_accuracy improved from 0.74000 to 0.81000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00016-0.57076-0.81297-0.46929-0.81000.h5 14/14 [==============================] - 43s 3s/step - loss: 0.5708 - categorical_accuracy: 0.8130 - val_loss: 0.4693 - val_categorical_accuracy: 0.8100 Epoch 17/100 14/14 [==============================] - ETA: 0s - loss: 0.4422 - categorical_accuracy: 0.8658 Epoch 00017: val_categorical_accuracy improved from 0.81000 to 0.84000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00017-0.44216-0.86576-0.44511-0.84000.h5 14/14 [==============================] - 41s 3s/step - loss: 0.4422 - categorical_accuracy: 0.8658 - val_loss: 0.4451 - val_categorical_accuracy: 0.8400 Epoch 18/100 14/14 [==============================] - ETA: 0s - loss: 0.3647 - categorical_accuracy: 0.8884 Epoch 00018: val_categorical_accuracy improved from 0.84000 to 0.85000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00018-0.36471-0.88839-0.40455-0.85000.h5 14/14 [==============================] - 41s 3s/step - loss: 0.3647 - categorical_accuracy: 0.8884 - val_loss: 0.4046 - val_categorical_accuracy: 0.8500 Epoch 19/100 14/14 [==============================] - ETA: 0s - loss: 0.2903 - categorical_accuracy: 0.9125 Epoch 00019: val_categorical_accuracy did not improve from 0.85000 14/14 [==============================] - 41s 3s/step - loss: 0.2903 - categorical_accuracy: 0.9125 - val_loss: 0.4061 - val_categorical_accuracy: 0.8300 Epoch 20/100 14/14 [==============================] - ETA: 0s - loss: 0.2486 - categorical_accuracy: 0.9306 Epoch 00020: val_categorical_accuracy improved from 0.85000 to 0.88000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00020-0.24864-0.93062-0.38642-0.88000.h5 14/14 [==============================] - 41s 3s/step - loss: 0.2486 - categorical_accuracy: 0.9306 - val_loss: 0.3864 - val_categorical_accuracy: 0.8800 Epoch 21/100 14/14 [==============================] - ETA: 0s - loss: 0.1964 - categorical_accuracy: 0.9578 Epoch 00021: val_categorical_accuracy improved from 0.88000 to 0.89000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00021-0.19640-0.95777-0.40192-0.89000.h5 14/14 [==============================] - 42s 3s/step - loss: 0.1964 - categorical_accuracy: 0.9578 - val_loss: 0.4019 - val_categorical_accuracy: 0.8900 Epoch 22/100 14/14 [==============================] - ETA: 0s - loss: 0.1623 - categorical_accuracy: 0.9608 Epoch 00022: val_categorical_accuracy did not improve from 0.89000 14/14 [==============================] - 42s 3s/step - loss: 0.1623 - categorical_accuracy: 0.9608 - val_loss: 0.3356 - val_categorical_accuracy: 0.8700 Epoch 23/100 14/14 [==============================] - ETA: 0s - loss: 0.1436 - categorical_accuracy: 0.9683 Epoch 00023: val_categorical_accuracy did not improve from 0.89000 14/14 [==============================] - 42s 3s/step - loss: 0.1436 - categorical_accuracy: 0.9683 - val_loss: 0.3599 - val_categorical_accuracy: 0.8900 Epoch 24/100 14/14 [==============================] - ETA: 0s - loss: 0.1102 - categorical_accuracy: 0.9774 Epoch 00024: val_categorical_accuracy improved from 0.89000 to 0.92000, saving model to model_VGG16_TF_GRU_2021-05-2709_08_44.142931/model-00024-0.11021-0.97738-0.25395-0.92000.h5 14/14 [==============================] - 43s 3s/step - loss: 0.1102 - categorical_accuracy: 0.9774 - val_loss: 0.2539 - val_categorical_accuracy: 0.9200 Epoch 25/100 14/14 [==============================] - ETA: 0s - loss: 0.0971 - categorical_accuracy: 0.9759 Epoch 00025: val_categorical_accuracy did not improve from 0.92000 14/14 [==============================] - 43s 3s/step - loss: 0.0971 - categorical_accuracy: 0.9759 - val_loss: 0.4006 - val_categorical_accuracy: 0.9000 Epoch 26/100 14/14 [==============================] - ETA: 0s - loss: 0.0832 - categorical_accuracy: 0.9804 Epoch 00026: val_categorical_accuracy did not improve from 0.92000 14/14 [==============================] - 43s 3s/step - loss: 0.0832 - categorical_accuracy: 0.9804 - val_loss: 0.6118 - val_categorical_accuracy: 0.8200 Epoch 27/100 14/14 [==============================] - ETA: 0s - loss: 0.0851 - categorical_accuracy: 0.9819 Epoch 00027: val_categorical_accuracy did not improve from 0.92000 14/14 [==============================] - 44s 3s/step - loss: 0.0851 - categorical_accuracy: 0.9819 - val_loss: 0.3732 - val_categorical_accuracy: 0.8900 Epoch 28/100 14/14 [==============================] - ETA: 0s - loss: 0.0668 - categorical_accuracy: 0.9849 Epoch 00028: val_categorical_accuracy did not improve from 0.92000 Epoch 00028: ReduceLROnPlateau reducing learning rate to 0.00015000000130385163. 14/14 [==============================] - 43s 3s/step - loss: 0.0668 - categorical_accuracy: 0.9849 - val_loss: 0.3975 - val_categorical_accuracy: 0.9000 Epoch 29/100 14/14 [==============================] - ETA: 0s - loss: 0.0560 - categorical_accuracy: 0.9925 Epoch 00029: val_categorical_accuracy did not improve from 0.92000 14/14 [==============================] - 43s 3s/step - loss: 0.0560 - categorical_accuracy: 0.9925 - val_loss: 0.3848 - val_categorical_accuracy: 0.8800 Epoch 30/100 14/14 [==============================] - ETA: 0s - loss: 0.0419 - categorical_accuracy: 0.9970 Epoch 00030: val_categorical_accuracy did not improve from 0.92000 14/14 [==============================] - 44s 3s/step - loss: 0.0419 - categorical_accuracy: 0.9970 - val_loss: 0.4334 - val_categorical_accuracy: 0.8800 Epoch 31/100 14/14 [==============================] - ETA: 0s - loss: 0.0408 - categorical_accuracy: 0.9925 Epoch 00031: val_categorical_accuracy did not improve from 0.92000 14/14 [==============================] - 43s 3s/step - loss: 0.0408 - categorical_accuracy: 0.9925 - val_loss: 0.3729 - val_categorical_accuracy: 0.8900 Epoch 32/100 14/14 [==============================] - ETA: 0s - loss: 0.0320 - categorical_accuracy: 0.9985 Epoch 00032: val_categorical_accuracy did not improve from 0.92000 Epoch 00032: ReduceLROnPlateau reducing learning rate to 3.000000142492354e-05. 14/14 [==============================] - 43s 3s/step - loss: 0.0320 - categorical_accuracy: 0.9985 - val_loss: 0.4924 - val_categorical_accuracy: 0.8500 Epoch 33/100 14/14 [==============================] - ETA: 0s - loss: 0.0340 - categorical_accuracy: 0.9985 Epoch 00033: val_categorical_accuracy did not improve from 0.92000 14/14 [==============================] - 42s 3s/step - loss: 0.0340 - categorical_accuracy: 0.9985 - val_loss: 0.3899 - val_categorical_accuracy: 0.8900 Epoch 34/100 14/14 [==============================] - ETA: 0s - loss: 0.0335 - categorical_accuracy: 0.9970 Epoch 00034: val_categorical_accuracy did not improve from 0.92000 14/14 [==============================] - 41s 3s/step - loss: 0.0335 - categorical_accuracy: 0.9970 - val_loss: 0.3834 - val_categorical_accuracy: 0.9000 Epoch 00034: early stopping
modelplot(model_history)
# Load the winner model i.e. MobileNet+GRU Model
model = load_model('./MobileNet_h5/model-00015-0.00722-1.00000-0.01149-1.00000.h5')
WARNING:tensorflow:Layer gru_2 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU
#model.summary()
# Test the model using one random image from the validation data set
# Get one random image from validation data set
one_img = generator(val_path, val_doc, batch_size = 2, num_frames = 20,
frame_height = 120, frame_width = 120, augment = False, normalize=True)
one_img_data, one_img_label = next(one_img)
# Actual Class
print('Actual Class of the Image: ', list(one_img_label[0]).index(1))
# Predicted Class from the model
print('Predicted Class of the Image: ', np.argmax(model.predict(one_img_data), axis = 1)[0])
Source path = /datasets/Project_data/val ; batch size = 2 Image Indexes: [ 0 2 3 5 6 8 9 11 12 14 15 17 18 20 21 23 24 26 27 29] Actual Class of the Image: 3 Predicted Class of the Image: 3
model = load_model('./h5_Conv3D/model-00027-0.05054-0.98643-0.17316-0.94000.h5')
# Test the model using one random image from the validation data set
# Get one random image from validation data set
one_img = generator(val_path, val_doc, batch_size = 2, num_frames = 20,
frame_height = 120, frame_width = 120, augment = False, normalize=False)
one_img_data, one_img_label = next(one_img)
# Actual Class
print('Actual Class of the Image: ', list(one_img_label[0]).index(1))
# Predicted Class from the model
print('Predicted Class of the Image: ', np.argmax(model.predict(one_img_data), axis = 1)[0])
Source path = /datasets/Project_data/val ; batch size = 2 Image Indexes: [ 0 2 3 5 6 8 9 11 12 14 15 17 18 20 21 23 24 26 27 29] Actual Class of the Image: 4 Predicted Class of the Image: 4
model = load_model('./h5_GRU/model-00042-0.00941-0.99849-0.30157-0.91000.h5')
WARNING:tensorflow:Layer gru will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU WARNING:tensorflow:Layer gru_1 will not use cuDNN kernel since it doesn't meet the cuDNN kernel criteria. It will use generic GPU kernel as fallback when running on GPU
# Test the model using one random image from the validation data set
# Get one random image from validation data set
one_img = generator(val_path, val_doc, batch_size = 2, num_frames = 16,
frame_height = 120, frame_width = 120, augment = False, normalize=False)
one_img_data, one_img_label = next(one_img)
# Actual Class
print('Actual Class of the Image: ', list(one_img_label[0]).index(1))
# Predicted Class from the model
print('Predicted Class of the Image: ', np.argmax(model.predict(one_img_data), axis = 1)[0])
Source path = /datasets/Project_data/val ; batch size = 2 Image Indexes: [ 0 2 4 6 8 10 12 14 15 17 19 21 23 25 27 29] Actual Class of the Image: 2 Predicted Class of the Image: 2
The model has correctly predicted the class of the random image chosen
We end the experimentation now